Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29 
1618 /*
1619 Define this macro to 0/1 to disable/enable support for recording functionality,
1620 available through VmaAllocatorCreateInfo::pRecordSettings.
1621 */
1622 #ifndef VMA_RECORDING_ENABLED
1623  #ifdef _WIN32
1624  #define VMA_RECORDING_ENABLED 1
1625  #else
1626  #define VMA_RECORDING_ENABLED 0
1627  #endif
1628 #endif
1629 
1630 #ifndef NOMINMAX
1631  #define NOMINMAX // For windows.h
1632 #endif
1633 
1634 #ifndef VULKAN_H_
1635  #include <vulkan/vulkan.h>
1636 #endif
1637 
1638 #if VMA_RECORDING_ENABLED
1639  #include <windows.h>
1640 #endif
1641 
1642 #if !defined(VMA_DEDICATED_ALLOCATION)
1643  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
1644  #define VMA_DEDICATED_ALLOCATION 1
1645  #else
1646  #define VMA_DEDICATED_ALLOCATION 0
1647  #endif
1648 #endif
1649 
1659 VK_DEFINE_HANDLE(VmaAllocator)
1660 
1661 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
1663  VmaAllocator allocator,
1664  uint32_t memoryType,
1665  VkDeviceMemory memory,
1666  VkDeviceSize size);
1668 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
1669  VmaAllocator allocator,
1670  uint32_t memoryType,
1671  VkDeviceMemory memory,
1672  VkDeviceSize size);
1673 
1687 
1717 
1720 typedef VkFlags VmaAllocatorCreateFlags;
1721 
1726 typedef struct VmaVulkanFunctions {
1727  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
1728  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
1729  PFN_vkAllocateMemory vkAllocateMemory;
1730  PFN_vkFreeMemory vkFreeMemory;
1731  PFN_vkMapMemory vkMapMemory;
1732  PFN_vkUnmapMemory vkUnmapMemory;
1733  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
1734  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
1735  PFN_vkBindBufferMemory vkBindBufferMemory;
1736  PFN_vkBindImageMemory vkBindImageMemory;
1737  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
1738  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
1739  PFN_vkCreateBuffer vkCreateBuffer;
1740  PFN_vkDestroyBuffer vkDestroyBuffer;
1741  PFN_vkCreateImage vkCreateImage;
1742  PFN_vkDestroyImage vkDestroyImage;
1743  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
1744 #if VMA_DEDICATED_ALLOCATION
1745  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
1746  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
1747 #endif
1749 
1751 typedef enum VmaRecordFlagBits {
1758 
1761 typedef VkFlags VmaRecordFlags;
1762 
1764 typedef struct VmaRecordSettings
1765 {
1775  const char* pFilePath;
1777 
1780 {
1784 
1785  VkPhysicalDevice physicalDevice;
1787 
1788  VkDevice device;
1790 
1793 
1794  const VkAllocationCallbacks* pAllocationCallbacks;
1796 
1836  const VkDeviceSize* pHeapSizeLimit;
1857 
1859 VkResult vmaCreateAllocator(
1860  const VmaAllocatorCreateInfo* pCreateInfo,
1861  VmaAllocator* pAllocator);
1862 
1864 void vmaDestroyAllocator(
1865  VmaAllocator allocator);
1866 
1872  VmaAllocator allocator,
1873  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
1874 
1880  VmaAllocator allocator,
1881  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
1882 
1890  VmaAllocator allocator,
1891  uint32_t memoryTypeIndex,
1892  VkMemoryPropertyFlags* pFlags);
1893 
1903  VmaAllocator allocator,
1904  uint32_t frameIndex);
1905 
1908 typedef struct VmaStatInfo
1909 {
1911  uint32_t blockCount;
1917  VkDeviceSize usedBytes;
1919  VkDeviceSize unusedBytes;
1922 } VmaStatInfo;
1923 
1925 typedef struct VmaStats
1926 {
1927  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
1928  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
1930 } VmaStats;
1931 
1933 void vmaCalculateStats(
1934  VmaAllocator allocator,
1935  VmaStats* pStats);
1936 
1937 #define VMA_STATS_STRING_ENABLED 1
1938 
1939 #if VMA_STATS_STRING_ENABLED
1940 
1942 
1944 void vmaBuildStatsString(
1945  VmaAllocator allocator,
1946  char** ppStatsString,
1947  VkBool32 detailedMap);
1948 
1949 void vmaFreeStatsString(
1950  VmaAllocator allocator,
1951  char* pStatsString);
1952 
1953 #endif // #if VMA_STATS_STRING_ENABLED
1954 
1963 VK_DEFINE_HANDLE(VmaPool)
1964 
1965 typedef enum VmaMemoryUsage
1966 {
2015 } VmaMemoryUsage;
2016 
2031 
2086 
2102 
2112 
2119 
2123 
2125 {
2138  VkMemoryPropertyFlags requiredFlags;
2143  VkMemoryPropertyFlags preferredFlags;
2151  uint32_t memoryTypeBits;
2164  void* pUserData;
2166 
2183 VkResult vmaFindMemoryTypeIndex(
2184  VmaAllocator allocator,
2185  uint32_t memoryTypeBits,
2186  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2187  uint32_t* pMemoryTypeIndex);
2188 
2202  VmaAllocator allocator,
2203  const VkBufferCreateInfo* pBufferCreateInfo,
2204  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2205  uint32_t* pMemoryTypeIndex);
2206 
2220  VmaAllocator allocator,
2221  const VkImageCreateInfo* pImageCreateInfo,
2222  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2223  uint32_t* pMemoryTypeIndex);
2224 
2245 
2262 
2273 
2279 
2282 typedef VkFlags VmaPoolCreateFlags;
2283 
2286 typedef struct VmaPoolCreateInfo {
2301  VkDeviceSize blockSize;
2330 
2333 typedef struct VmaPoolStats {
2336  VkDeviceSize size;
2339  VkDeviceSize unusedSize;
2352  VkDeviceSize unusedRangeSizeMax;
2355  size_t blockCount;
2356 } VmaPoolStats;
2357 
2364 VkResult vmaCreatePool(
2365  VmaAllocator allocator,
2366  const VmaPoolCreateInfo* pCreateInfo,
2367  VmaPool* pPool);
2368 
2371 void vmaDestroyPool(
2372  VmaAllocator allocator,
2373  VmaPool pool);
2374 
2381 void vmaGetPoolStats(
2382  VmaAllocator allocator,
2383  VmaPool pool,
2384  VmaPoolStats* pPoolStats);
2385 
2393  VmaAllocator allocator,
2394  VmaPool pool,
2395  size_t* pLostAllocationCount);
2396 
2411 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
2412 
2437 VK_DEFINE_HANDLE(VmaAllocation)
2438 
2439 
2441 typedef struct VmaAllocationInfo {
2446  uint32_t memoryType;
2455  VkDeviceMemory deviceMemory;
2460  VkDeviceSize offset;
2465  VkDeviceSize size;
2479  void* pUserData;
2481 
2492 VkResult vmaAllocateMemory(
2493  VmaAllocator allocator,
2494  const VkMemoryRequirements* pVkMemoryRequirements,
2495  const VmaAllocationCreateInfo* pCreateInfo,
2496  VmaAllocation* pAllocation,
2497  VmaAllocationInfo* pAllocationInfo);
2498 
2506  VmaAllocator allocator,
2507  VkBuffer buffer,
2508  const VmaAllocationCreateInfo* pCreateInfo,
2509  VmaAllocation* pAllocation,
2510  VmaAllocationInfo* pAllocationInfo);
2511 
2513 VkResult vmaAllocateMemoryForImage(
2514  VmaAllocator allocator,
2515  VkImage image,
2516  const VmaAllocationCreateInfo* pCreateInfo,
2517  VmaAllocation* pAllocation,
2518  VmaAllocationInfo* pAllocationInfo);
2519 
2521 void vmaFreeMemory(
2522  VmaAllocator allocator,
2523  VmaAllocation allocation);
2524 
2545 VkResult vmaResizeAllocation(
2546  VmaAllocator allocator,
2547  VmaAllocation allocation,
2548  VkDeviceSize newSize);
2549 
2567  VmaAllocator allocator,
2568  VmaAllocation allocation,
2569  VmaAllocationInfo* pAllocationInfo);
2570 
2585 VkBool32 vmaTouchAllocation(
2586  VmaAllocator allocator,
2587  VmaAllocation allocation);
2588 
2603  VmaAllocator allocator,
2604  VmaAllocation allocation,
2605  void* pUserData);
2606 
2618  VmaAllocator allocator,
2619  VmaAllocation* pAllocation);
2620 
2655 VkResult vmaMapMemory(
2656  VmaAllocator allocator,
2657  VmaAllocation allocation,
2658  void** ppData);
2659 
2664 void vmaUnmapMemory(
2665  VmaAllocator allocator,
2666  VmaAllocation allocation);
2667 
2680 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2681 
2694 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
2695 
2712 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
2713 
2720 VK_DEFINE_HANDLE(VmaDefragmentationContext)
2721 
2722 typedef enum VmaDefragmentationFlagBits {
2726 typedef VkFlags VmaDefragmentationFlags;
2727 
2732 typedef struct VmaDefragmentationInfo2 {
2756  uint32_t poolCount;
2777  VkDeviceSize maxCpuBytesToMove;
2787  VkDeviceSize maxGpuBytesToMove;
2801  VkCommandBuffer commandBuffer;
2803 
2808 typedef struct VmaDefragmentationInfo {
2813  VkDeviceSize maxBytesToMove;
2820 
2822 typedef struct VmaDefragmentationStats {
2824  VkDeviceSize bytesMoved;
2826  VkDeviceSize bytesFreed;
2832 
2859 VkResult vmaDefragmentationBegin(
2860  VmaAllocator allocator,
2861  const VmaDefragmentationInfo2* pInfo,
2862  VmaDefragmentationStats* pStats,
2863  VmaDefragmentationContext *pContext);
2864 
2870 VkResult vmaDefragmentationEnd(
2871  VmaAllocator allocator,
2872  VmaDefragmentationContext context);
2873 
2914 VkResult vmaDefragment(
2915  VmaAllocator allocator,
2916  VmaAllocation* pAllocations,
2917  size_t allocationCount,
2918  VkBool32* pAllocationsChanged,
2919  const VmaDefragmentationInfo *pDefragmentationInfo,
2920  VmaDefragmentationStats* pDefragmentationStats);
2921 
2934 VkResult vmaBindBufferMemory(
2935  VmaAllocator allocator,
2936  VmaAllocation allocation,
2937  VkBuffer buffer);
2938 
2951 VkResult vmaBindImageMemory(
2952  VmaAllocator allocator,
2953  VmaAllocation allocation,
2954  VkImage image);
2955 
2982 VkResult vmaCreateBuffer(
2983  VmaAllocator allocator,
2984  const VkBufferCreateInfo* pBufferCreateInfo,
2985  const VmaAllocationCreateInfo* pAllocationCreateInfo,
2986  VkBuffer* pBuffer,
2987  VmaAllocation* pAllocation,
2988  VmaAllocationInfo* pAllocationInfo);
2989 
3001 void vmaDestroyBuffer(
3002  VmaAllocator allocator,
3003  VkBuffer buffer,
3004  VmaAllocation allocation);
3005 
3007 VkResult vmaCreateImage(
3008  VmaAllocator allocator,
3009  const VkImageCreateInfo* pImageCreateInfo,
3010  const VmaAllocationCreateInfo* pAllocationCreateInfo,
3011  VkImage* pImage,
3012  VmaAllocation* pAllocation,
3013  VmaAllocationInfo* pAllocationInfo);
3014 
3026 void vmaDestroyImage(
3027  VmaAllocator allocator,
3028  VkImage image,
3029  VmaAllocation allocation);
3030 
3031 #ifdef __cplusplus
3032 }
3033 #endif
3034 
3035 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3036 
3037 // For Visual Studio IntelliSense.
3038 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3039 #define VMA_IMPLEMENTATION
3040 #endif
3041 
3042 #ifdef VMA_IMPLEMENTATION
3043 #undef VMA_IMPLEMENTATION
3044 
3045 #include <cstdint>
3046 #include <cstdlib>
3047 #include <cstring>
3048 
3049 /*******************************************************************************
3050 CONFIGURATION SECTION
3051 
3052 Define some of these macros before each #include of this header or change them
3053 here if you need other then default behavior depending on your environment.
3054 */
3055 
3056 /*
3057 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3058 internally, like:
3059 
3060  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3061 
3062 Define to 0 if you are going to provide you own pointers to Vulkan functions via
3063 VmaAllocatorCreateInfo::pVulkanFunctions.
3064 */
3065 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3066 #define VMA_STATIC_VULKAN_FUNCTIONS 1
3067 #endif
3068 
3069 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
3070 //#define VMA_USE_STL_CONTAINERS 1
3071 
3072 /* Set this macro to 1 to make the library including and using STL containers:
3073 std::pair, std::vector, std::list, std::unordered_map.
3074 
3075 Set it to 0 or undefined to make the library using its own implementation of
3076 the containers.
3077 */
3078 #if VMA_USE_STL_CONTAINERS
3079  #define VMA_USE_STL_VECTOR 1
3080  #define VMA_USE_STL_UNORDERED_MAP 1
3081  #define VMA_USE_STL_LIST 1
3082 #endif
3083 
3084 #ifndef VMA_USE_STL_SHARED_MUTEX
3085  // Minimum Visual Studio 2015 Update 2
3086  #if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918
3087  #define VMA_USE_STL_SHARED_MUTEX 1
3088  #endif
3089 #endif
3090 
3091 #if VMA_USE_STL_VECTOR
3092  #include <vector>
3093 #endif
3094 
3095 #if VMA_USE_STL_UNORDERED_MAP
3096  #include <unordered_map>
3097 #endif
3098 
3099 #if VMA_USE_STL_LIST
3100  #include <list>
3101 #endif
3102 
3103 /*
3104 Following headers are used in this CONFIGURATION section only, so feel free to
3105 remove them if not needed.
3106 */
3107 #include <cassert> // for assert
3108 #include <algorithm> // for min, max
3109 #include <mutex>
3110 #include <atomic> // for std::atomic
3111 
3112 #ifndef VMA_NULL
3113  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
3114  #define VMA_NULL nullptr
3115 #endif
3116 
3117 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
3118 #include <cstdlib>
3119 void *aligned_alloc(size_t alignment, size_t size)
3120 {
3121  // alignment must be >= sizeof(void*)
3122  if(alignment < sizeof(void*))
3123  {
3124  alignment = sizeof(void*);
3125  }
3126 
3127  return memalign(alignment, size);
3128 }
3129 #elif defined(__APPLE__) || defined(__ANDROID__)
3130 #include <cstdlib>
3131 void *aligned_alloc(size_t alignment, size_t size)
3132 {
3133  // alignment must be >= sizeof(void*)
3134  if(alignment < sizeof(void*))
3135  {
3136  alignment = sizeof(void*);
3137  }
3138 
3139  void *pointer;
3140  if(posix_memalign(&pointer, alignment, size) == 0)
3141  return pointer;
3142  return VMA_NULL;
3143 }
3144 #endif
3145 
3146 // If your compiler is not compatible with C++11 and definition of
3147 // aligned_alloc() function is missing, uncommeting following line may help:
3148 
3149 //#include <malloc.h>
3150 
3151 // Normal assert to check for programmer's errors, especially in Debug configuration.
3152 #ifndef VMA_ASSERT
3153  #ifdef _DEBUG
3154  #define VMA_ASSERT(expr) assert(expr)
3155  #else
3156  #define VMA_ASSERT(expr)
3157  #endif
3158 #endif
3159 
3160 // Assert that will be called very often, like inside data structures e.g. operator[].
3161 // Making it non-empty can make program slow.
3162 #ifndef VMA_HEAVY_ASSERT
3163  #ifdef _DEBUG
3164  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
3165  #else
3166  #define VMA_HEAVY_ASSERT(expr)
3167  #endif
3168 #endif
3169 
3170 #ifndef VMA_ALIGN_OF
3171  #define VMA_ALIGN_OF(type) (__alignof(type))
3172 #endif
3173 
3174 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
3175  #if defined(_WIN32)
3176  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
3177  #else
3178  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
3179  #endif
3180 #endif
3181 
3182 #ifndef VMA_SYSTEM_FREE
3183  #if defined(_WIN32)
3184  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
3185  #else
3186  #define VMA_SYSTEM_FREE(ptr) free(ptr)
3187  #endif
3188 #endif
3189 
3190 #ifndef VMA_MIN
3191  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
3192 #endif
3193 
3194 #ifndef VMA_MAX
3195  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
3196 #endif
3197 
3198 #ifndef VMA_SWAP
3199  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
3200 #endif
3201 
3202 #ifndef VMA_SORT
3203  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
3204 #endif
3205 
3206 #ifndef VMA_DEBUG_LOG
3207  #define VMA_DEBUG_LOG(format, ...)
3208  /*
3209  #define VMA_DEBUG_LOG(format, ...) do { \
3210  printf(format, __VA_ARGS__); \
3211  printf("\n"); \
3212  } while(false)
3213  */
3214 #endif
3215 
3216 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
3217 #if VMA_STATS_STRING_ENABLED
3218  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
3219  {
3220  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
3221  }
3222  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
3223  {
3224  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
3225  }
3226  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
3227  {
3228  snprintf(outStr, strLen, "%p", ptr);
3229  }
3230 #endif
3231 
3232 #ifndef VMA_MUTEX
3233  class VmaMutex
3234  {
3235  public:
3236  void Lock() { m_Mutex.lock(); }
3237  void Unlock() { m_Mutex.unlock(); }
3238  private:
3239  std::mutex m_Mutex;
3240  };
3241  #define VMA_MUTEX VmaMutex
3242 #endif
3243 
3244 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
3245 #ifndef VMA_RW_MUTEX
3246  #if VMA_USE_STL_SHARED_MUTEX
3247  // Use std::shared_mutex from C++17.
3248  #include <shared_mutex>
3249  class VmaRWMutex
3250  {
3251  public:
3252  void LockRead() { m_Mutex.lock_shared(); }
3253  void UnlockRead() { m_Mutex.unlock_shared(); }
3254  void LockWrite() { m_Mutex.lock(); }
3255  void UnlockWrite() { m_Mutex.unlock(); }
3256  private:
3257  std::shared_mutex m_Mutex;
3258  };
3259  #define VMA_RW_MUTEX VmaRWMutex
3260  #elif defined(_WIN32)
3261  // Use SRWLOCK from WinAPI.
3262  class VmaRWMutex
3263  {
3264  public:
3265  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
3266  void LockRead() { AcquireSRWLockShared(&m_Lock); }
3267  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
3268  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
3269  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
3270  private:
3271  SRWLOCK m_Lock;
3272  };
3273  #define VMA_RW_MUTEX VmaRWMutex
3274  #else
3275  // Less efficient fallback: Use normal mutex.
3276  class VmaRWMutex
3277  {
3278  public:
3279  void LockRead() { m_Mutex.Lock(); }
3280  void UnlockRead() { m_Mutex.Unlock(); }
3281  void LockWrite() { m_Mutex.Lock(); }
3282  void UnlockWrite() { m_Mutex.Unlock(); }
3283  private:
3284  VMA_MUTEX m_Mutex;
3285  };
3286  #define VMA_RW_MUTEX VmaRWMutex
3287  #endif // #if VMA_USE_STL_SHARED_MUTEX
3288 #endif // #ifndef VMA_RW_MUTEX
3289 
3290 /*
3291 If providing your own implementation, you need to implement a subset of std::atomic:
3292 
3293 - Constructor(uint32_t desired)
3294 - uint32_t load() const
3295 - void store(uint32_t desired)
3296 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
3297 */
3298 #ifndef VMA_ATOMIC_UINT32
3299  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
3300 #endif
3301 
3302 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
3303 
3307  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
3308 #endif
3309 
3310 #ifndef VMA_DEBUG_ALIGNMENT
3311 
3315  #define VMA_DEBUG_ALIGNMENT (1)
3316 #endif
3317 
3318 #ifndef VMA_DEBUG_MARGIN
3319 
3323  #define VMA_DEBUG_MARGIN (0)
3324 #endif
3325 
3326 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
3327 
3331  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
3332 #endif
3333 
3334 #ifndef VMA_DEBUG_DETECT_CORRUPTION
3335 
3340  #define VMA_DEBUG_DETECT_CORRUPTION (0)
3341 #endif
3342 
3343 #ifndef VMA_DEBUG_GLOBAL_MUTEX
3344 
3348  #define VMA_DEBUG_GLOBAL_MUTEX (0)
3349 #endif
3350 
3351 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
3352 
3356  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
3357 #endif
3358 
3359 #ifndef VMA_SMALL_HEAP_MAX_SIZE
3360  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
3362 #endif
3363 
3364 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
3365  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
3367 #endif
3368 
3369 #ifndef VMA_CLASS_NO_COPY
3370  #define VMA_CLASS_NO_COPY(className) \
3371  private: \
3372  className(const className&) = delete; \
3373  className& operator=(const className&) = delete;
3374 #endif
3375 
3376 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
3377 
3378 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
3379 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
3380 
3381 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
3382 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
3383 
3384 /*******************************************************************************
3385 END OF CONFIGURATION
3386 */
3387 
3388 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
3389 
3390 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
3391  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
3392 
3393 // Returns number of bits set to 1 in (v).
3394 static inline uint32_t VmaCountBitsSet(uint32_t v)
3395 {
3396  uint32_t c = v - ((v >> 1) & 0x55555555);
3397  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
3398  c = ((c >> 4) + c) & 0x0F0F0F0F;
3399  c = ((c >> 8) + c) & 0x00FF00FF;
3400  c = ((c >> 16) + c) & 0x0000FFFF;
3401  return c;
3402 }
3403 
3404 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
3405 // Use types like uint32_t, uint64_t as T.
3406 template <typename T>
3407 static inline T VmaAlignUp(T val, T align)
3408 {
3409  return (val + align - 1) / align * align;
3410 }
3411 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
3412 // Use types like uint32_t, uint64_t as T.
3413 template <typename T>
3414 static inline T VmaAlignDown(T val, T align)
3415 {
3416  return val / align * align;
3417 }
3418 
3419 // Division with mathematical rounding to nearest number.
3420 template <typename T>
3421 static inline T VmaRoundDiv(T x, T y)
3422 {
3423  return (x + (y / (T)2)) / y;
3424 }
3425 
3426 /*
3427 Returns true if given number is a power of two.
3428 T must be unsigned integer number or signed integer but always nonnegative.
3429 For 0 returns true.
3430 */
3431 template <typename T>
3432 inline bool VmaIsPow2(T x)
3433 {
3434  return (x & (x-1)) == 0;
3435 }
3436 
3437 // Returns smallest power of 2 greater or equal to v.
3438 static inline uint32_t VmaNextPow2(uint32_t v)
3439 {
3440  v--;
3441  v |= v >> 1;
3442  v |= v >> 2;
3443  v |= v >> 4;
3444  v |= v >> 8;
3445  v |= v >> 16;
3446  v++;
3447  return v;
3448 }
3449 static inline uint64_t VmaNextPow2(uint64_t v)
3450 {
3451  v--;
3452  v |= v >> 1;
3453  v |= v >> 2;
3454  v |= v >> 4;
3455  v |= v >> 8;
3456  v |= v >> 16;
3457  v |= v >> 32;
3458  v++;
3459  return v;
3460 }
3461 
3462 // Returns largest power of 2 less or equal to v.
3463 static inline uint32_t VmaPrevPow2(uint32_t v)
3464 {
3465  v |= v >> 1;
3466  v |= v >> 2;
3467  v |= v >> 4;
3468  v |= v >> 8;
3469  v |= v >> 16;
3470  v = v ^ (v >> 1);
3471  return v;
3472 }
3473 static inline uint64_t VmaPrevPow2(uint64_t v)
3474 {
3475  v |= v >> 1;
3476  v |= v >> 2;
3477  v |= v >> 4;
3478  v |= v >> 8;
3479  v |= v >> 16;
3480  v |= v >> 32;
3481  v = v ^ (v >> 1);
3482  return v;
3483 }
3484 
3485 static inline bool VmaStrIsEmpty(const char* pStr)
3486 {
3487  return pStr == VMA_NULL || *pStr == '\0';
3488 }
3489 
3490 static const char* VmaAlgorithmToStr(uint32_t algorithm)
3491 {
3492  switch(algorithm)
3493  {
3495  return "Linear";
3497  return "Buddy";
3498  case 0:
3499  return "Default";
3500  default:
3501  VMA_ASSERT(0);
3502  return "";
3503  }
3504 }
3505 
3506 #ifndef VMA_SORT
3507 
3508 template<typename Iterator, typename Compare>
3509 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
3510 {
3511  Iterator centerValue = end; --centerValue;
3512  Iterator insertIndex = beg;
3513  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
3514  {
3515  if(cmp(*memTypeIndex, *centerValue))
3516  {
3517  if(insertIndex != memTypeIndex)
3518  {
3519  VMA_SWAP(*memTypeIndex, *insertIndex);
3520  }
3521  ++insertIndex;
3522  }
3523  }
3524  if(insertIndex != centerValue)
3525  {
3526  VMA_SWAP(*insertIndex, *centerValue);
3527  }
3528  return insertIndex;
3529 }
3530 
3531 template<typename Iterator, typename Compare>
3532 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
3533 {
3534  if(beg < end)
3535  {
3536  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
3537  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
3538  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
3539  }
3540 }
3541 
3542 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
3543 
3544 #endif // #ifndef VMA_SORT
3545 
3546 /*
3547 Returns true if two memory blocks occupy overlapping pages.
3548 ResourceA must be in less memory offset than ResourceB.
3549 
3550 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
3551 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
3552 */
3553 static inline bool VmaBlocksOnSamePage(
3554  VkDeviceSize resourceAOffset,
3555  VkDeviceSize resourceASize,
3556  VkDeviceSize resourceBOffset,
3557  VkDeviceSize pageSize)
3558 {
3559  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
3560  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
3561  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
3562  VkDeviceSize resourceBStart = resourceBOffset;
3563  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
3564  return resourceAEndPage == resourceBStartPage;
3565 }
3566 
3567 enum VmaSuballocationType
3568 {
3569  VMA_SUBALLOCATION_TYPE_FREE = 0,
3570  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
3571  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
3572  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
3573  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
3574  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
3575  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
3576 };
3577 
3578 /*
3579 Returns true if given suballocation types could conflict and must respect
3580 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
3581 or linear image and another one is optimal image. If type is unknown, behave
3582 conservatively.
3583 */
3584 static inline bool VmaIsBufferImageGranularityConflict(
3585  VmaSuballocationType suballocType1,
3586  VmaSuballocationType suballocType2)
3587 {
3588  if(suballocType1 > suballocType2)
3589  {
3590  VMA_SWAP(suballocType1, suballocType2);
3591  }
3592 
3593  switch(suballocType1)
3594  {
3595  case VMA_SUBALLOCATION_TYPE_FREE:
3596  return false;
3597  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
3598  return true;
3599  case VMA_SUBALLOCATION_TYPE_BUFFER:
3600  return
3601  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3602  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3603  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
3604  return
3605  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
3606  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
3607  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3608  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
3609  return
3610  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
3611  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
3612  return false;
3613  default:
3614  VMA_ASSERT(0);
3615  return true;
3616  }
3617 }
3618 
3619 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
3620 {
3621  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
3622  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3623  for(size_t i = 0; i < numberCount; ++i, ++pDst)
3624  {
3625  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
3626  }
3627 }
3628 
3629 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
3630 {
3631  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
3632  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
3633  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
3634  {
3635  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
3636  {
3637  return false;
3638  }
3639  }
3640  return true;
3641 }
3642 
3643 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
3644 struct VmaMutexLock
3645 {
3646  VMA_CLASS_NO_COPY(VmaMutexLock)
3647 public:
3648  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex) :
3649  m_pMutex(useMutex ? &mutex : VMA_NULL)
3650  { if(m_pMutex) { m_pMutex->Lock(); } }
3651  ~VmaMutexLock()
3652  { if(m_pMutex) { m_pMutex->Unlock(); } }
3653 private:
3654  VMA_MUTEX* m_pMutex;
3655 };
3656 
3657 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
3658 struct VmaMutexLockRead
3659 {
3660  VMA_CLASS_NO_COPY(VmaMutexLockRead)
3661 public:
3662  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
3663  m_pMutex(useMutex ? &mutex : VMA_NULL)
3664  { if(m_pMutex) { m_pMutex->LockRead(); } }
3665  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
3666 private:
3667  VMA_RW_MUTEX* m_pMutex;
3668 };
3669 
3670 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
3671 struct VmaMutexLockWrite
3672 {
3673  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
3674 public:
3675  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
3676  m_pMutex(useMutex ? &mutex : VMA_NULL)
3677  { if(m_pMutex) { m_pMutex->LockWrite(); } }
3678  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
3679 private:
3680  VMA_RW_MUTEX* m_pMutex;
3681 };
3682 
3683 #if VMA_DEBUG_GLOBAL_MUTEX
3684  static VMA_MUTEX gDebugGlobalMutex;
3685  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
3686 #else
3687  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
3688 #endif
3689 
3690 // Minimum size of a free suballocation to register it in the free suballocation collection.
3691 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
3692 
3693 /*
3694 Performs binary search and returns iterator to first element that is greater or
3695 equal to (key), according to comparison (cmp).
3696 
3697 Cmp should return true if first argument is less than second argument.
3698 
3699 Returned value is the found element, if present in the collection or place where
3700 new element with value (key) should be inserted.
3701 */
3702 template <typename CmpLess, typename IterT, typename KeyT>
3703 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
3704 {
3705  size_t down = 0, up = (end - beg);
3706  while(down < up)
3707  {
3708  const size_t mid = (down + up) / 2;
3709  if(cmp(*(beg+mid), key))
3710  {
3711  down = mid + 1;
3712  }
3713  else
3714  {
3715  up = mid;
3716  }
3717  }
3718  return beg + down;
3719 }
3720 
3721 /*
3722 Returns true if all pointers in the array are not-null and unique.
3723 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
3724 T must be pointer type, e.g. VmaAllocation, VmaPool.
3725 */
3726 template<typename T>
3727 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
3728 {
3729  for(uint32_t i = 0; i < count; ++i)
3730  {
3731  const T iPtr = arr[i];
3732  if(iPtr == VMA_NULL)
3733  {
3734  return false;
3735  }
3736  for(uint32_t j = i + 1; j < count; ++j)
3737  {
3738  if(iPtr == arr[j])
3739  {
3740  return false;
3741  }
3742  }
3743  }
3744  return true;
3745 }
3746 
3748 // Memory allocation
3749 
3750 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
3751 {
3752  if((pAllocationCallbacks != VMA_NULL) &&
3753  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
3754  {
3755  return (*pAllocationCallbacks->pfnAllocation)(
3756  pAllocationCallbacks->pUserData,
3757  size,
3758  alignment,
3759  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3760  }
3761  else
3762  {
3763  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
3764  }
3765 }
3766 
3767 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
3768 {
3769  if((pAllocationCallbacks != VMA_NULL) &&
3770  (pAllocationCallbacks->pfnFree != VMA_NULL))
3771  {
3772  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
3773  }
3774  else
3775  {
3776  VMA_SYSTEM_FREE(ptr);
3777  }
3778 }
3779 
3780 template<typename T>
3781 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
3782 {
3783  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
3784 }
3785 
3786 template<typename T>
3787 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
3788 {
3789  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
3790 }
3791 
3792 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
3793 
3794 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
3795 
3796 template<typename T>
3797 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
3798 {
3799  ptr->~T();
3800  VmaFree(pAllocationCallbacks, ptr);
3801 }
3802 
3803 template<typename T>
3804 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
3805 {
3806  if(ptr != VMA_NULL)
3807  {
3808  for(size_t i = count; i--; )
3809  {
3810  ptr[i].~T();
3811  }
3812  VmaFree(pAllocationCallbacks, ptr);
3813  }
3814 }
3815 
3816 // STL-compatible allocator.
3817 template<typename T>
3818 class VmaStlAllocator
3819 {
3820 public:
3821  const VkAllocationCallbacks* const m_pCallbacks;
3822  typedef T value_type;
3823 
3824  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
3825  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
3826 
3827  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
3828  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
3829 
3830  template<typename U>
3831  bool operator==(const VmaStlAllocator<U>& rhs) const
3832  {
3833  return m_pCallbacks == rhs.m_pCallbacks;
3834  }
3835  template<typename U>
3836  bool operator!=(const VmaStlAllocator<U>& rhs) const
3837  {
3838  return m_pCallbacks != rhs.m_pCallbacks;
3839  }
3840 
3841  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
3842 };
3843 
3844 #if VMA_USE_STL_VECTOR
3845 
3846 #define VmaVector std::vector
3847 
3848 template<typename T, typename allocatorT>
3849 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
3850 {
3851  vec.insert(vec.begin() + index, item);
3852 }
3853 
3854 template<typename T, typename allocatorT>
3855 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
3856 {
3857  vec.erase(vec.begin() + index);
3858 }
3859 
3860 #else // #if VMA_USE_STL_VECTOR
3861 
3862 /* Class with interface compatible with subset of std::vector.
3863 T must be POD because constructors and destructors are not called and memcpy is
3864 used for these objects. */
3865 template<typename T, typename AllocatorT>
3866 class VmaVector
3867 {
3868 public:
3869  typedef T value_type;
3870 
3871  VmaVector(const AllocatorT& allocator) :
3872  m_Allocator(allocator),
3873  m_pArray(VMA_NULL),
3874  m_Count(0),
3875  m_Capacity(0)
3876  {
3877  }
3878 
3879  VmaVector(size_t count, const AllocatorT& allocator) :
3880  m_Allocator(allocator),
3881  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
3882  m_Count(count),
3883  m_Capacity(count)
3884  {
3885  }
3886 
3887  VmaVector(const VmaVector<T, AllocatorT>& src) :
3888  m_Allocator(src.m_Allocator),
3889  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
3890  m_Count(src.m_Count),
3891  m_Capacity(src.m_Count)
3892  {
3893  if(m_Count != 0)
3894  {
3895  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
3896  }
3897  }
3898 
3899  ~VmaVector()
3900  {
3901  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3902  }
3903 
3904  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
3905  {
3906  if(&rhs != this)
3907  {
3908  resize(rhs.m_Count);
3909  if(m_Count != 0)
3910  {
3911  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
3912  }
3913  }
3914  return *this;
3915  }
3916 
3917  bool empty() const { return m_Count == 0; }
3918  size_t size() const { return m_Count; }
3919  T* data() { return m_pArray; }
3920  const T* data() const { return m_pArray; }
3921 
3922  T& operator[](size_t index)
3923  {
3924  VMA_HEAVY_ASSERT(index < m_Count);
3925  return m_pArray[index];
3926  }
3927  const T& operator[](size_t index) const
3928  {
3929  VMA_HEAVY_ASSERT(index < m_Count);
3930  return m_pArray[index];
3931  }
3932 
3933  T& front()
3934  {
3935  VMA_HEAVY_ASSERT(m_Count > 0);
3936  return m_pArray[0];
3937  }
3938  const T& front() const
3939  {
3940  VMA_HEAVY_ASSERT(m_Count > 0);
3941  return m_pArray[0];
3942  }
3943  T& back()
3944  {
3945  VMA_HEAVY_ASSERT(m_Count > 0);
3946  return m_pArray[m_Count - 1];
3947  }
3948  const T& back() const
3949  {
3950  VMA_HEAVY_ASSERT(m_Count > 0);
3951  return m_pArray[m_Count - 1];
3952  }
3953 
3954  void reserve(size_t newCapacity, bool freeMemory = false)
3955  {
3956  newCapacity = VMA_MAX(newCapacity, m_Count);
3957 
3958  if((newCapacity < m_Capacity) && !freeMemory)
3959  {
3960  newCapacity = m_Capacity;
3961  }
3962 
3963  if(newCapacity != m_Capacity)
3964  {
3965  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
3966  if(m_Count != 0)
3967  {
3968  memcpy(newArray, m_pArray, m_Count * sizeof(T));
3969  }
3970  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3971  m_Capacity = newCapacity;
3972  m_pArray = newArray;
3973  }
3974  }
3975 
3976  void resize(size_t newCount, bool freeMemory = false)
3977  {
3978  size_t newCapacity = m_Capacity;
3979  if(newCount > m_Capacity)
3980  {
3981  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
3982  }
3983  else if(freeMemory)
3984  {
3985  newCapacity = newCount;
3986  }
3987 
3988  if(newCapacity != m_Capacity)
3989  {
3990  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
3991  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
3992  if(elementsToCopy != 0)
3993  {
3994  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
3995  }
3996  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
3997  m_Capacity = newCapacity;
3998  m_pArray = newArray;
3999  }
4000 
4001  m_Count = newCount;
4002  }
4003 
4004  void clear(bool freeMemory = false)
4005  {
4006  resize(0, freeMemory);
4007  }
4008 
4009  void insert(size_t index, const T& src)
4010  {
4011  VMA_HEAVY_ASSERT(index <= m_Count);
4012  const size_t oldCount = size();
4013  resize(oldCount + 1);
4014  if(index < oldCount)
4015  {
4016  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
4017  }
4018  m_pArray[index] = src;
4019  }
4020 
4021  void remove(size_t index)
4022  {
4023  VMA_HEAVY_ASSERT(index < m_Count);
4024  const size_t oldCount = size();
4025  if(index < oldCount - 1)
4026  {
4027  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
4028  }
4029  resize(oldCount - 1);
4030  }
4031 
4032  void push_back(const T& src)
4033  {
4034  const size_t newIndex = size();
4035  resize(newIndex + 1);
4036  m_pArray[newIndex] = src;
4037  }
4038 
4039  void pop_back()
4040  {
4041  VMA_HEAVY_ASSERT(m_Count > 0);
4042  resize(size() - 1);
4043  }
4044 
4045  void push_front(const T& src)
4046  {
4047  insert(0, src);
4048  }
4049 
4050  void pop_front()
4051  {
4052  VMA_HEAVY_ASSERT(m_Count > 0);
4053  remove(0);
4054  }
4055 
4056  typedef T* iterator;
4057 
4058  iterator begin() { return m_pArray; }
4059  iterator end() { return m_pArray + m_Count; }
4060 
4061 private:
4062  AllocatorT m_Allocator;
4063  T* m_pArray;
4064  size_t m_Count;
4065  size_t m_Capacity;
4066 };
4067 
4068 template<typename T, typename allocatorT>
4069 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
4070 {
4071  vec.insert(index, item);
4072 }
4073 
4074 template<typename T, typename allocatorT>
4075 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
4076 {
4077  vec.remove(index);
4078 }
4079 
4080 #endif // #if VMA_USE_STL_VECTOR
4081 
4082 template<typename CmpLess, typename VectorT>
4083 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
4084 {
4085  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4086  vector.data(),
4087  vector.data() + vector.size(),
4088  value,
4089  CmpLess()) - vector.data();
4090  VmaVectorInsert(vector, indexToInsert, value);
4091  return indexToInsert;
4092 }
4093 
4094 template<typename CmpLess, typename VectorT>
4095 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
4096 {
4097  CmpLess comparator;
4098  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
4099  vector.begin(),
4100  vector.end(),
4101  value,
4102  comparator);
4103  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
4104  {
4105  size_t indexToRemove = it - vector.begin();
4106  VmaVectorRemove(vector, indexToRemove);
4107  return true;
4108  }
4109  return false;
4110 }
4111 
4112 template<typename CmpLess, typename IterT, typename KeyT>
4113 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
4114 {
4115  CmpLess comparator;
4116  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4117  beg, end, value, comparator);
4118  if(it == end ||
4119  (!comparator(*it, value) && !comparator(value, *it)))
4120  {
4121  return it;
4122  }
4123  return end;
4124 }
4125 
4127 // class VmaPoolAllocator
4128 
4129 /*
4130 Allocator for objects of type T using a list of arrays (pools) to speed up
4131 allocation. Number of elements that can be allocated is not bounded because
4132 allocator can create multiple blocks.
4133 */
4134 template<typename T>
4135 class VmaPoolAllocator
4136 {
4137  VMA_CLASS_NO_COPY(VmaPoolAllocator)
4138 public:
4139  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock);
4140  ~VmaPoolAllocator();
4141  void Clear();
4142  T* Alloc();
4143  void Free(T* ptr);
4144 
4145 private:
4146  union Item
4147  {
4148  uint32_t NextFreeIndex;
4149  T Value;
4150  };
4151 
4152  struct ItemBlock
4153  {
4154  Item* pItems;
4155  uint32_t FirstFreeIndex;
4156  };
4157 
4158  const VkAllocationCallbacks* m_pAllocationCallbacks;
4159  size_t m_ItemsPerBlock;
4160  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
4161 
4162  ItemBlock& CreateNewBlock();
4163 };
4164 
4165 template<typename T>
4166 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, size_t itemsPerBlock) :
4167  m_pAllocationCallbacks(pAllocationCallbacks),
4168  m_ItemsPerBlock(itemsPerBlock),
4169  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
4170 {
4171  VMA_ASSERT(itemsPerBlock > 0);
4172 }
4173 
4174 template<typename T>
4175 VmaPoolAllocator<T>::~VmaPoolAllocator()
4176 {
4177  Clear();
4178 }
4179 
4180 template<typename T>
4181 void VmaPoolAllocator<T>::Clear()
4182 {
4183  for(size_t i = m_ItemBlocks.size(); i--; )
4184  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemsPerBlock);
4185  m_ItemBlocks.clear();
4186 }
4187 
4188 template<typename T>
4189 T* VmaPoolAllocator<T>::Alloc()
4190 {
4191  for(size_t i = m_ItemBlocks.size(); i--; )
4192  {
4193  ItemBlock& block = m_ItemBlocks[i];
4194  // This block has some free items: Use first one.
4195  if(block.FirstFreeIndex != UINT32_MAX)
4196  {
4197  Item* const pItem = &block.pItems[block.FirstFreeIndex];
4198  block.FirstFreeIndex = pItem->NextFreeIndex;
4199  return &pItem->Value;
4200  }
4201  }
4202 
4203  // No block has free item: Create new one and use it.
4204  ItemBlock& newBlock = CreateNewBlock();
4205  Item* const pItem = &newBlock.pItems[0];
4206  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
4207  return &pItem->Value;
4208 }
4209 
4210 template<typename T>
4211 void VmaPoolAllocator<T>::Free(T* ptr)
4212 {
4213  // Search all memory blocks to find ptr.
4214  for(size_t i = 0; i < m_ItemBlocks.size(); ++i)
4215  {
4216  ItemBlock& block = m_ItemBlocks[i];
4217 
4218  // Casting to union.
4219  Item* pItemPtr;
4220  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
4221 
4222  // Check if pItemPtr is in address range of this block.
4223  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + m_ItemsPerBlock))
4224  {
4225  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
4226  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
4227  block.FirstFreeIndex = index;
4228  return;
4229  }
4230  }
4231  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
4232 }
4233 
4234 template<typename T>
4235 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
4236 {
4237  ItemBlock newBlock = {
4238  vma_new_array(m_pAllocationCallbacks, Item, m_ItemsPerBlock), 0 };
4239 
4240  m_ItemBlocks.push_back(newBlock);
4241 
4242  // Setup singly-linked list of all free items in this block.
4243  for(uint32_t i = 0; i < m_ItemsPerBlock - 1; ++i)
4244  newBlock.pItems[i].NextFreeIndex = i + 1;
4245  newBlock.pItems[m_ItemsPerBlock - 1].NextFreeIndex = UINT32_MAX;
4246  return m_ItemBlocks.back();
4247 }
4248 
4250 // class VmaRawList, VmaList
4251 
4252 #if VMA_USE_STL_LIST
4253 
4254 #define VmaList std::list
4255 
4256 #else // #if VMA_USE_STL_LIST
4257 
4258 template<typename T>
4259 struct VmaListItem
4260 {
4261  VmaListItem* pPrev;
4262  VmaListItem* pNext;
4263  T Value;
4264 };
4265 
4266 // Doubly linked list.
4267 template<typename T>
4268 class VmaRawList
4269 {
4270  VMA_CLASS_NO_COPY(VmaRawList)
4271 public:
4272  typedef VmaListItem<T> ItemType;
4273 
4274  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
4275  ~VmaRawList();
4276  void Clear();
4277 
4278  size_t GetCount() const { return m_Count; }
4279  bool IsEmpty() const { return m_Count == 0; }
4280 
4281  ItemType* Front() { return m_pFront; }
4282  const ItemType* Front() const { return m_pFront; }
4283  ItemType* Back() { return m_pBack; }
4284  const ItemType* Back() const { return m_pBack; }
4285 
4286  ItemType* PushBack();
4287  ItemType* PushFront();
4288  ItemType* PushBack(const T& value);
4289  ItemType* PushFront(const T& value);
4290  void PopBack();
4291  void PopFront();
4292 
4293  // Item can be null - it means PushBack.
4294  ItemType* InsertBefore(ItemType* pItem);
4295  // Item can be null - it means PushFront.
4296  ItemType* InsertAfter(ItemType* pItem);
4297 
4298  ItemType* InsertBefore(ItemType* pItem, const T& value);
4299  ItemType* InsertAfter(ItemType* pItem, const T& value);
4300 
4301  void Remove(ItemType* pItem);
4302 
4303 private:
4304  const VkAllocationCallbacks* const m_pAllocationCallbacks;
4305  VmaPoolAllocator<ItemType> m_ItemAllocator;
4306  ItemType* m_pFront;
4307  ItemType* m_pBack;
4308  size_t m_Count;
4309 };
4310 
4311 template<typename T>
4312 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
4313  m_pAllocationCallbacks(pAllocationCallbacks),
4314  m_ItemAllocator(pAllocationCallbacks, 128),
4315  m_pFront(VMA_NULL),
4316  m_pBack(VMA_NULL),
4317  m_Count(0)
4318 {
4319 }
4320 
4321 template<typename T>
4322 VmaRawList<T>::~VmaRawList()
4323 {
4324  // Intentionally not calling Clear, because that would be unnecessary
4325  // computations to return all items to m_ItemAllocator as free.
4326 }
4327 
4328 template<typename T>
4329 void VmaRawList<T>::Clear()
4330 {
4331  if(IsEmpty() == false)
4332  {
4333  ItemType* pItem = m_pBack;
4334  while(pItem != VMA_NULL)
4335  {
4336  ItemType* const pPrevItem = pItem->pPrev;
4337  m_ItemAllocator.Free(pItem);
4338  pItem = pPrevItem;
4339  }
4340  m_pFront = VMA_NULL;
4341  m_pBack = VMA_NULL;
4342  m_Count = 0;
4343  }
4344 }
4345 
4346 template<typename T>
4347 VmaListItem<T>* VmaRawList<T>::PushBack()
4348 {
4349  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4350  pNewItem->pNext = VMA_NULL;
4351  if(IsEmpty())
4352  {
4353  pNewItem->pPrev = VMA_NULL;
4354  m_pFront = pNewItem;
4355  m_pBack = pNewItem;
4356  m_Count = 1;
4357  }
4358  else
4359  {
4360  pNewItem->pPrev = m_pBack;
4361  m_pBack->pNext = pNewItem;
4362  m_pBack = pNewItem;
4363  ++m_Count;
4364  }
4365  return pNewItem;
4366 }
4367 
4368 template<typename T>
4369 VmaListItem<T>* VmaRawList<T>::PushFront()
4370 {
4371  ItemType* const pNewItem = m_ItemAllocator.Alloc();
4372  pNewItem->pPrev = VMA_NULL;
4373  if(IsEmpty())
4374  {
4375  pNewItem->pNext = VMA_NULL;
4376  m_pFront = pNewItem;
4377  m_pBack = pNewItem;
4378  m_Count = 1;
4379  }
4380  else
4381  {
4382  pNewItem->pNext = m_pFront;
4383  m_pFront->pPrev = pNewItem;
4384  m_pFront = pNewItem;
4385  ++m_Count;
4386  }
4387  return pNewItem;
4388 }
4389 
4390 template<typename T>
4391 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
4392 {
4393  ItemType* const pNewItem = PushBack();
4394  pNewItem->Value = value;
4395  return pNewItem;
4396 }
4397 
4398 template<typename T>
4399 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
4400 {
4401  ItemType* const pNewItem = PushFront();
4402  pNewItem->Value = value;
4403  return pNewItem;
4404 }
4405 
4406 template<typename T>
4407 void VmaRawList<T>::PopBack()
4408 {
4409  VMA_HEAVY_ASSERT(m_Count > 0);
4410  ItemType* const pBackItem = m_pBack;
4411  ItemType* const pPrevItem = pBackItem->pPrev;
4412  if(pPrevItem != VMA_NULL)
4413  {
4414  pPrevItem->pNext = VMA_NULL;
4415  }
4416  m_pBack = pPrevItem;
4417  m_ItemAllocator.Free(pBackItem);
4418  --m_Count;
4419 }
4420 
4421 template<typename T>
4422 void VmaRawList<T>::PopFront()
4423 {
4424  VMA_HEAVY_ASSERT(m_Count > 0);
4425  ItemType* const pFrontItem = m_pFront;
4426  ItemType* const pNextItem = pFrontItem->pNext;
4427  if(pNextItem != VMA_NULL)
4428  {
4429  pNextItem->pPrev = VMA_NULL;
4430  }
4431  m_pFront = pNextItem;
4432  m_ItemAllocator.Free(pFrontItem);
4433  --m_Count;
4434 }
4435 
4436 template<typename T>
4437 void VmaRawList<T>::Remove(ItemType* pItem)
4438 {
4439  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
4440  VMA_HEAVY_ASSERT(m_Count > 0);
4441 
4442  if(pItem->pPrev != VMA_NULL)
4443  {
4444  pItem->pPrev->pNext = pItem->pNext;
4445  }
4446  else
4447  {
4448  VMA_HEAVY_ASSERT(m_pFront == pItem);
4449  m_pFront = pItem->pNext;
4450  }
4451 
4452  if(pItem->pNext != VMA_NULL)
4453  {
4454  pItem->pNext->pPrev = pItem->pPrev;
4455  }
4456  else
4457  {
4458  VMA_HEAVY_ASSERT(m_pBack == pItem);
4459  m_pBack = pItem->pPrev;
4460  }
4461 
4462  m_ItemAllocator.Free(pItem);
4463  --m_Count;
4464 }
4465 
4466 template<typename T>
4467 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
4468 {
4469  if(pItem != VMA_NULL)
4470  {
4471  ItemType* const prevItem = pItem->pPrev;
4472  ItemType* const newItem = m_ItemAllocator.Alloc();
4473  newItem->pPrev = prevItem;
4474  newItem->pNext = pItem;
4475  pItem->pPrev = newItem;
4476  if(prevItem != VMA_NULL)
4477  {
4478  prevItem->pNext = newItem;
4479  }
4480  else
4481  {
4482  VMA_HEAVY_ASSERT(m_pFront == pItem);
4483  m_pFront = newItem;
4484  }
4485  ++m_Count;
4486  return newItem;
4487  }
4488  else
4489  return PushBack();
4490 }
4491 
4492 template<typename T>
4493 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
4494 {
4495  if(pItem != VMA_NULL)
4496  {
4497  ItemType* const nextItem = pItem->pNext;
4498  ItemType* const newItem = m_ItemAllocator.Alloc();
4499  newItem->pNext = nextItem;
4500  newItem->pPrev = pItem;
4501  pItem->pNext = newItem;
4502  if(nextItem != VMA_NULL)
4503  {
4504  nextItem->pPrev = newItem;
4505  }
4506  else
4507  {
4508  VMA_HEAVY_ASSERT(m_pBack == pItem);
4509  m_pBack = newItem;
4510  }
4511  ++m_Count;
4512  return newItem;
4513  }
4514  else
4515  return PushFront();
4516 }
4517 
4518 template<typename T>
4519 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
4520 {
4521  ItemType* const newItem = InsertBefore(pItem);
4522  newItem->Value = value;
4523  return newItem;
4524 }
4525 
4526 template<typename T>
4527 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
4528 {
4529  ItemType* const newItem = InsertAfter(pItem);
4530  newItem->Value = value;
4531  return newItem;
4532 }
4533 
4534 template<typename T, typename AllocatorT>
4535 class VmaList
4536 {
4537  VMA_CLASS_NO_COPY(VmaList)
4538 public:
4539  class iterator
4540  {
4541  public:
4542  iterator() :
4543  m_pList(VMA_NULL),
4544  m_pItem(VMA_NULL)
4545  {
4546  }
4547 
4548  T& operator*() const
4549  {
4550  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4551  return m_pItem->Value;
4552  }
4553  T* operator->() const
4554  {
4555  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4556  return &m_pItem->Value;
4557  }
4558 
4559  iterator& operator++()
4560  {
4561  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4562  m_pItem = m_pItem->pNext;
4563  return *this;
4564  }
4565  iterator& operator--()
4566  {
4567  if(m_pItem != VMA_NULL)
4568  {
4569  m_pItem = m_pItem->pPrev;
4570  }
4571  else
4572  {
4573  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4574  m_pItem = m_pList->Back();
4575  }
4576  return *this;
4577  }
4578 
4579  iterator operator++(int)
4580  {
4581  iterator result = *this;
4582  ++*this;
4583  return result;
4584  }
4585  iterator operator--(int)
4586  {
4587  iterator result = *this;
4588  --*this;
4589  return result;
4590  }
4591 
4592  bool operator==(const iterator& rhs) const
4593  {
4594  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4595  return m_pItem == rhs.m_pItem;
4596  }
4597  bool operator!=(const iterator& rhs) const
4598  {
4599  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4600  return m_pItem != rhs.m_pItem;
4601  }
4602 
4603  private:
4604  VmaRawList<T>* m_pList;
4605  VmaListItem<T>* m_pItem;
4606 
4607  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
4608  m_pList(pList),
4609  m_pItem(pItem)
4610  {
4611  }
4612 
4613  friend class VmaList<T, AllocatorT>;
4614  };
4615 
4616  class const_iterator
4617  {
4618  public:
4619  const_iterator() :
4620  m_pList(VMA_NULL),
4621  m_pItem(VMA_NULL)
4622  {
4623  }
4624 
4625  const_iterator(const iterator& src) :
4626  m_pList(src.m_pList),
4627  m_pItem(src.m_pItem)
4628  {
4629  }
4630 
4631  const T& operator*() const
4632  {
4633  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4634  return m_pItem->Value;
4635  }
4636  const T* operator->() const
4637  {
4638  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4639  return &m_pItem->Value;
4640  }
4641 
4642  const_iterator& operator++()
4643  {
4644  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
4645  m_pItem = m_pItem->pNext;
4646  return *this;
4647  }
4648  const_iterator& operator--()
4649  {
4650  if(m_pItem != VMA_NULL)
4651  {
4652  m_pItem = m_pItem->pPrev;
4653  }
4654  else
4655  {
4656  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
4657  m_pItem = m_pList->Back();
4658  }
4659  return *this;
4660  }
4661 
4662  const_iterator operator++(int)
4663  {
4664  const_iterator result = *this;
4665  ++*this;
4666  return result;
4667  }
4668  const_iterator operator--(int)
4669  {
4670  const_iterator result = *this;
4671  --*this;
4672  return result;
4673  }
4674 
4675  bool operator==(const const_iterator& rhs) const
4676  {
4677  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4678  return m_pItem == rhs.m_pItem;
4679  }
4680  bool operator!=(const const_iterator& rhs) const
4681  {
4682  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
4683  return m_pItem != rhs.m_pItem;
4684  }
4685 
4686  private:
4687  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
4688  m_pList(pList),
4689  m_pItem(pItem)
4690  {
4691  }
4692 
4693  const VmaRawList<T>* m_pList;
4694  const VmaListItem<T>* m_pItem;
4695 
4696  friend class VmaList<T, AllocatorT>;
4697  };
4698 
4699  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
4700 
4701  bool empty() const { return m_RawList.IsEmpty(); }
4702  size_t size() const { return m_RawList.GetCount(); }
4703 
4704  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
4705  iterator end() { return iterator(&m_RawList, VMA_NULL); }
4706 
4707  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
4708  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
4709 
4710  void clear() { m_RawList.Clear(); }
4711  void push_back(const T& value) { m_RawList.PushBack(value); }
4712  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
4713  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
4714 
4715 private:
4716  VmaRawList<T> m_RawList;
4717 };
4718 
4719 #endif // #if VMA_USE_STL_LIST
4720 
4722 // class VmaMap
4723 
4724 // Unused in this version.
4725 #if 0
4726 
4727 #if VMA_USE_STL_UNORDERED_MAP
4728 
4729 #define VmaPair std::pair
4730 
4731 #define VMA_MAP_TYPE(KeyT, ValueT) \
4732  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
4733 
4734 #else // #if VMA_USE_STL_UNORDERED_MAP
4735 
4736 template<typename T1, typename T2>
4737 struct VmaPair
4738 {
4739  T1 first;
4740  T2 second;
4741 
4742  VmaPair() : first(), second() { }
4743  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
4744 };
4745 
4746 /* Class compatible with subset of interface of std::unordered_map.
4747 KeyT, ValueT must be POD because they will be stored in VmaVector.
4748 */
4749 template<typename KeyT, typename ValueT>
4750 class VmaMap
4751 {
4752 public:
4753  typedef VmaPair<KeyT, ValueT> PairType;
4754  typedef PairType* iterator;
4755 
4756  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
4757 
4758  iterator begin() { return m_Vector.begin(); }
4759  iterator end() { return m_Vector.end(); }
4760 
4761  void insert(const PairType& pair);
4762  iterator find(const KeyT& key);
4763  void erase(iterator it);
4764 
4765 private:
4766  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
4767 };
4768 
4769 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
4770 
4771 template<typename FirstT, typename SecondT>
4772 struct VmaPairFirstLess
4773 {
4774  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
4775  {
4776  return lhs.first < rhs.first;
4777  }
4778  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
4779  {
4780  return lhs.first < rhsFirst;
4781  }
4782 };
4783 
4784 template<typename KeyT, typename ValueT>
4785 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
4786 {
4787  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
4788  m_Vector.data(),
4789  m_Vector.data() + m_Vector.size(),
4790  pair,
4791  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
4792  VmaVectorInsert(m_Vector, indexToInsert, pair);
4793 }
4794 
4795 template<typename KeyT, typename ValueT>
4796 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
4797 {
4798  PairType* it = VmaBinaryFindFirstNotLess(
4799  m_Vector.data(),
4800  m_Vector.data() + m_Vector.size(),
4801  key,
4802  VmaPairFirstLess<KeyT, ValueT>());
4803  if((it != m_Vector.end()) && (it->first == key))
4804  {
4805  return it;
4806  }
4807  else
4808  {
4809  return m_Vector.end();
4810  }
4811 }
4812 
4813 template<typename KeyT, typename ValueT>
4814 void VmaMap<KeyT, ValueT>::erase(iterator it)
4815 {
4816  VmaVectorRemove(m_Vector, it - m_Vector.begin());
4817 }
4818 
4819 #endif // #if VMA_USE_STL_UNORDERED_MAP
4820 
4821 #endif // #if 0
4822 
4824 
4825 class VmaDeviceMemoryBlock;
4826 
4827 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
4828 
4829 struct VmaAllocation_T
4830 {
4831  VMA_CLASS_NO_COPY(VmaAllocation_T)
4832 private:
4833  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
4834 
4835  enum FLAGS
4836  {
4837  FLAG_USER_DATA_STRING = 0x01,
4838  };
4839 
4840 public:
4841  enum ALLOCATION_TYPE
4842  {
4843  ALLOCATION_TYPE_NONE,
4844  ALLOCATION_TYPE_BLOCK,
4845  ALLOCATION_TYPE_DEDICATED,
4846  };
4847 
4848  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
4849  m_Alignment(1),
4850  m_Size(0),
4851  m_pUserData(VMA_NULL),
4852  m_LastUseFrameIndex(currentFrameIndex),
4853  m_Type((uint8_t)ALLOCATION_TYPE_NONE),
4854  m_SuballocationType((uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN),
4855  m_MapCount(0),
4856  m_Flags(userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0)
4857  {
4858 #if VMA_STATS_STRING_ENABLED
4859  m_CreationFrameIndex = currentFrameIndex;
4860  m_BufferImageUsage = 0;
4861 #endif
4862  }
4863 
4864  ~VmaAllocation_T()
4865  {
4866  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
4867 
4868  // Check if owned string was freed.
4869  VMA_ASSERT(m_pUserData == VMA_NULL);
4870  }
4871 
4872  void InitBlockAllocation(
4873  VmaPool hPool,
4874  VmaDeviceMemoryBlock* block,
4875  VkDeviceSize offset,
4876  VkDeviceSize alignment,
4877  VkDeviceSize size,
4878  VmaSuballocationType suballocationType,
4879  bool mapped,
4880  bool canBecomeLost)
4881  {
4882  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4883  VMA_ASSERT(block != VMA_NULL);
4884  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4885  m_Alignment = alignment;
4886  m_Size = size;
4887  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4888  m_SuballocationType = (uint8_t)suballocationType;
4889  m_BlockAllocation.m_hPool = hPool;
4890  m_BlockAllocation.m_Block = block;
4891  m_BlockAllocation.m_Offset = offset;
4892  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
4893  }
4894 
4895  void InitLost()
4896  {
4897  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4898  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
4899  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
4900  m_BlockAllocation.m_hPool = VK_NULL_HANDLE;
4901  m_BlockAllocation.m_Block = VMA_NULL;
4902  m_BlockAllocation.m_Offset = 0;
4903  m_BlockAllocation.m_CanBecomeLost = true;
4904  }
4905 
4906  void ChangeBlockAllocation(
4907  VmaAllocator hAllocator,
4908  VmaDeviceMemoryBlock* block,
4909  VkDeviceSize offset);
4910 
4911  void ChangeSize(VkDeviceSize newSize);
4912  void ChangeOffset(VkDeviceSize newOffset);
4913 
4914  // pMappedData not null means allocation is created with MAPPED flag.
4915  void InitDedicatedAllocation(
4916  uint32_t memoryTypeIndex,
4917  VkDeviceMemory hMemory,
4918  VmaSuballocationType suballocationType,
4919  void* pMappedData,
4920  VkDeviceSize size)
4921  {
4922  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
4923  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
4924  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
4925  m_Alignment = 0;
4926  m_Size = size;
4927  m_SuballocationType = (uint8_t)suballocationType;
4928  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
4929  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
4930  m_DedicatedAllocation.m_hMemory = hMemory;
4931  m_DedicatedAllocation.m_pMappedData = pMappedData;
4932  }
4933 
4934  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
4935  VkDeviceSize GetAlignment() const { return m_Alignment; }
4936  VkDeviceSize GetSize() const { return m_Size; }
4937  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
4938  void* GetUserData() const { return m_pUserData; }
4939  void SetUserData(VmaAllocator hAllocator, void* pUserData);
4940  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
4941 
4942  VmaDeviceMemoryBlock* GetBlock() const
4943  {
4944  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
4945  return m_BlockAllocation.m_Block;
4946  }
4947  VkDeviceSize GetOffset() const;
4948  VkDeviceMemory GetMemory() const;
4949  uint32_t GetMemoryTypeIndex() const;
4950  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
4951  void* GetMappedData() const;
4952  bool CanBecomeLost() const;
4953  VmaPool GetPool() const;
4954 
4955  uint32_t GetLastUseFrameIndex() const
4956  {
4957  return m_LastUseFrameIndex.load();
4958  }
4959  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
4960  {
4961  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
4962  }
4963  /*
4964  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
4965  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
4966  - Else, returns false.
4967 
4968  If hAllocation is already lost, assert - you should not call it then.
4969  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
4970  */
4971  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
4972 
4973  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
4974  {
4975  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
4976  outInfo.blockCount = 1;
4977  outInfo.allocationCount = 1;
4978  outInfo.unusedRangeCount = 0;
4979  outInfo.usedBytes = m_Size;
4980  outInfo.unusedBytes = 0;
4981  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
4982  outInfo.unusedRangeSizeMin = UINT64_MAX;
4983  outInfo.unusedRangeSizeMax = 0;
4984  }
4985 
4986  void BlockAllocMap();
4987  void BlockAllocUnmap();
4988  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
4989  void DedicatedAllocUnmap(VmaAllocator hAllocator);
4990 
4991 #if VMA_STATS_STRING_ENABLED
4992  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
4993  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
4994 
4995  void InitBufferImageUsage(uint32_t bufferImageUsage)
4996  {
4997  VMA_ASSERT(m_BufferImageUsage == 0);
4998  m_BufferImageUsage = bufferImageUsage;
4999  }
5000 
5001  void PrintParameters(class VmaJsonWriter& json) const;
5002 #endif
5003 
5004 private:
5005  VkDeviceSize m_Alignment;
5006  VkDeviceSize m_Size;
5007  void* m_pUserData;
5008  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
5009  uint8_t m_Type; // ALLOCATION_TYPE
5010  uint8_t m_SuballocationType; // VmaSuballocationType
5011  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
5012  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
5013  uint8_t m_MapCount;
5014  uint8_t m_Flags; // enum FLAGS
5015 
5016  // Allocation out of VmaDeviceMemoryBlock.
5017  struct BlockAllocation
5018  {
5019  VmaPool m_hPool; // Null if belongs to general memory.
5020  VmaDeviceMemoryBlock* m_Block;
5021  VkDeviceSize m_Offset;
5022  bool m_CanBecomeLost;
5023  };
5024 
5025  // Allocation for an object that has its own private VkDeviceMemory.
5026  struct DedicatedAllocation
5027  {
5028  uint32_t m_MemoryTypeIndex;
5029  VkDeviceMemory m_hMemory;
5030  void* m_pMappedData; // Not null means memory is mapped.
5031  };
5032 
5033  union
5034  {
5035  // Allocation out of VmaDeviceMemoryBlock.
5036  BlockAllocation m_BlockAllocation;
5037  // Allocation for an object that has its own private VkDeviceMemory.
5038  DedicatedAllocation m_DedicatedAllocation;
5039  };
5040 
5041 #if VMA_STATS_STRING_ENABLED
5042  uint32_t m_CreationFrameIndex;
5043  uint32_t m_BufferImageUsage; // 0 if unknown.
5044 #endif
5045 
5046  void FreeUserDataString(VmaAllocator hAllocator);
5047 };
5048 
5049 /*
5050 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
5051 allocated memory block or free.
5052 */
5053 struct VmaSuballocation
5054 {
5055  VkDeviceSize offset;
5056  VkDeviceSize size;
5057  VmaAllocation hAllocation;
5058  VmaSuballocationType type;
5059 };
5060 
5061 // Comparator for offsets.
5062 struct VmaSuballocationOffsetLess
5063 {
5064  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5065  {
5066  return lhs.offset < rhs.offset;
5067  }
5068 };
5069 struct VmaSuballocationOffsetGreater
5070 {
5071  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
5072  {
5073  return lhs.offset > rhs.offset;
5074  }
5075 };
5076 
5077 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
5078 
5079 // Cost of one additional allocation lost, as equivalent in bytes.
5080 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
5081 
5082 /*
5083 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
5084 
5085 If canMakeOtherLost was false:
5086 - item points to a FREE suballocation.
5087 - itemsToMakeLostCount is 0.
5088 
5089 If canMakeOtherLost was true:
5090 - item points to first of sequence of suballocations, which are either FREE,
5091  or point to VmaAllocations that can become lost.
5092 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
5093  the requested allocation to succeed.
5094 */
5095 struct VmaAllocationRequest
5096 {
5097  VkDeviceSize offset;
5098  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
5099  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
5100  VmaSuballocationList::iterator item;
5101  size_t itemsToMakeLostCount;
5102  void* customData;
5103 
5104  VkDeviceSize CalcCost() const
5105  {
5106  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
5107  }
5108 };
5109 
5110 /*
5111 Data structure used for bookkeeping of allocations and unused ranges of memory
5112 in a single VkDeviceMemory block.
5113 */
5114 class VmaBlockMetadata
5115 {
5116 public:
5117  VmaBlockMetadata(VmaAllocator hAllocator);
5118  virtual ~VmaBlockMetadata() { }
5119  virtual void Init(VkDeviceSize size) { m_Size = size; }
5120 
5121  // Validates all data structures inside this object. If not valid, returns false.
5122  virtual bool Validate() const = 0;
5123  VkDeviceSize GetSize() const { return m_Size; }
5124  virtual size_t GetAllocationCount() const = 0;
5125  virtual VkDeviceSize GetSumFreeSize() const = 0;
5126  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
5127  // Returns true if this block is empty - contains only single free suballocation.
5128  virtual bool IsEmpty() const = 0;
5129 
5130  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
5131  // Shouldn't modify blockCount.
5132  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
5133 
5134 #if VMA_STATS_STRING_ENABLED
5135  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
5136 #endif
5137 
5138  // Tries to find a place for suballocation with given parameters inside this block.
5139  // If succeeded, fills pAllocationRequest and returns true.
5140  // If failed, returns false.
5141  virtual bool CreateAllocationRequest(
5142  uint32_t currentFrameIndex,
5143  uint32_t frameInUseCount,
5144  VkDeviceSize bufferImageGranularity,
5145  VkDeviceSize allocSize,
5146  VkDeviceSize allocAlignment,
5147  bool upperAddress,
5148  VmaSuballocationType allocType,
5149  bool canMakeOtherLost,
5150  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
5151  uint32_t strategy,
5152  VmaAllocationRequest* pAllocationRequest) = 0;
5153 
5154  virtual bool MakeRequestedAllocationsLost(
5155  uint32_t currentFrameIndex,
5156  uint32_t frameInUseCount,
5157  VmaAllocationRequest* pAllocationRequest) = 0;
5158 
5159  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
5160 
5161  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
5162 
5163  // Makes actual allocation based on request. Request must already be checked and valid.
5164  virtual void Alloc(
5165  const VmaAllocationRequest& request,
5166  VmaSuballocationType type,
5167  VkDeviceSize allocSize,
5168  bool upperAddress,
5169  VmaAllocation hAllocation) = 0;
5170 
5171  // Frees suballocation assigned to given memory region.
5172  virtual void Free(const VmaAllocation allocation) = 0;
5173  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
5174 
5175  // Tries to resize (grow or shrink) space for given allocation, in place.
5176  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
5177 
5178 protected:
5179  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
5180 
5181 #if VMA_STATS_STRING_ENABLED
5182  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
5183  VkDeviceSize unusedBytes,
5184  size_t allocationCount,
5185  size_t unusedRangeCount) const;
5186  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
5187  VkDeviceSize offset,
5188  VmaAllocation hAllocation) const;
5189  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
5190  VkDeviceSize offset,
5191  VkDeviceSize size) const;
5192  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
5193 #endif
5194 
5195 private:
5196  VkDeviceSize m_Size;
5197  const VkAllocationCallbacks* m_pAllocationCallbacks;
5198 };
5199 
5200 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
5201  VMA_ASSERT(0 && "Validation failed: " #cond); \
5202  return false; \
5203  } } while(false)
5204 
5205 class VmaBlockMetadata_Generic : public VmaBlockMetadata
5206 {
5207  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
5208 public:
5209  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
5210  virtual ~VmaBlockMetadata_Generic();
5211  virtual void Init(VkDeviceSize size);
5212 
5213  virtual bool Validate() const;
5214  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
5215  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5216  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5217  virtual bool IsEmpty() const;
5218 
5219  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5220  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5221 
5222 #if VMA_STATS_STRING_ENABLED
5223  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5224 #endif
5225 
5226  virtual bool CreateAllocationRequest(
5227  uint32_t currentFrameIndex,
5228  uint32_t frameInUseCount,
5229  VkDeviceSize bufferImageGranularity,
5230  VkDeviceSize allocSize,
5231  VkDeviceSize allocAlignment,
5232  bool upperAddress,
5233  VmaSuballocationType allocType,
5234  bool canMakeOtherLost,
5235  uint32_t strategy,
5236  VmaAllocationRequest* pAllocationRequest);
5237 
5238  virtual bool MakeRequestedAllocationsLost(
5239  uint32_t currentFrameIndex,
5240  uint32_t frameInUseCount,
5241  VmaAllocationRequest* pAllocationRequest);
5242 
5243  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5244 
5245  virtual VkResult CheckCorruption(const void* pBlockData);
5246 
5247  virtual void Alloc(
5248  const VmaAllocationRequest& request,
5249  VmaSuballocationType type,
5250  VkDeviceSize allocSize,
5251  bool upperAddress,
5252  VmaAllocation hAllocation);
5253 
5254  virtual void Free(const VmaAllocation allocation);
5255  virtual void FreeAtOffset(VkDeviceSize offset);
5256 
5257  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
5258 
5260  // For defragmentation
5261 
5262  bool IsBufferImageGranularityConflictPossible(
5263  VkDeviceSize bufferImageGranularity,
5264  VmaSuballocationType& inOutPrevSuballocType) const;
5265 
5266 private:
5267  friend class VmaDefragmentationAlgorithm_Generic;
5268  friend class VmaDefragmentationAlgorithm_Fast;
5269 
5270  uint32_t m_FreeCount;
5271  VkDeviceSize m_SumFreeSize;
5272  VmaSuballocationList m_Suballocations;
5273  // Suballocations that are free and have size greater than certain threshold.
5274  // Sorted by size, ascending.
5275  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
5276 
5277  bool ValidateFreeSuballocationList() const;
5278 
5279  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
5280  // If yes, fills pOffset and returns true. If no, returns false.
5281  bool CheckAllocation(
5282  uint32_t currentFrameIndex,
5283  uint32_t frameInUseCount,
5284  VkDeviceSize bufferImageGranularity,
5285  VkDeviceSize allocSize,
5286  VkDeviceSize allocAlignment,
5287  VmaSuballocationType allocType,
5288  VmaSuballocationList::const_iterator suballocItem,
5289  bool canMakeOtherLost,
5290  VkDeviceSize* pOffset,
5291  size_t* itemsToMakeLostCount,
5292  VkDeviceSize* pSumFreeSize,
5293  VkDeviceSize* pSumItemSize) const;
5294  // Given free suballocation, it merges it with following one, which must also be free.
5295  void MergeFreeWithNext(VmaSuballocationList::iterator item);
5296  // Releases given suballocation, making it free.
5297  // Merges it with adjacent free suballocations if applicable.
5298  // Returns iterator to new free suballocation at this place.
5299  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
5300  // Given free suballocation, it inserts it into sorted list of
5301  // m_FreeSuballocationsBySize if it's suitable.
5302  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
5303  // Given free suballocation, it removes it from sorted list of
5304  // m_FreeSuballocationsBySize if it's suitable.
5305  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
5306 };
5307 
5308 /*
5309 Allocations and their references in internal data structure look like this:
5310 
5311 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
5312 
5313  0 +-------+
5314  | |
5315  | |
5316  | |
5317  +-------+
5318  | Alloc | 1st[m_1stNullItemsBeginCount]
5319  +-------+
5320  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5321  +-------+
5322  | ... |
5323  +-------+
5324  | Alloc | 1st[1st.size() - 1]
5325  +-------+
5326  | |
5327  | |
5328  | |
5329 GetSize() +-------+
5330 
5331 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
5332 
5333  0 +-------+
5334  | Alloc | 2nd[0]
5335  +-------+
5336  | Alloc | 2nd[1]
5337  +-------+
5338  | ... |
5339  +-------+
5340  | Alloc | 2nd[2nd.size() - 1]
5341  +-------+
5342  | |
5343  | |
5344  | |
5345  +-------+
5346  | Alloc | 1st[m_1stNullItemsBeginCount]
5347  +-------+
5348  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5349  +-------+
5350  | ... |
5351  +-------+
5352  | Alloc | 1st[1st.size() - 1]
5353  +-------+
5354  | |
5355 GetSize() +-------+
5356 
5357 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
5358 
5359  0 +-------+
5360  | |
5361  | |
5362  | |
5363  +-------+
5364  | Alloc | 1st[m_1stNullItemsBeginCount]
5365  +-------+
5366  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
5367  +-------+
5368  | ... |
5369  +-------+
5370  | Alloc | 1st[1st.size() - 1]
5371  +-------+
5372  | |
5373  | |
5374  | |
5375  +-------+
5376  | Alloc | 2nd[2nd.size() - 1]
5377  +-------+
5378  | ... |
5379  +-------+
5380  | Alloc | 2nd[1]
5381  +-------+
5382  | Alloc | 2nd[0]
5383 GetSize() +-------+
5384 
5385 */
5386 class VmaBlockMetadata_Linear : public VmaBlockMetadata
5387 {
5388  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
5389 public:
5390  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
5391  virtual ~VmaBlockMetadata_Linear();
5392  virtual void Init(VkDeviceSize size);
5393 
5394  virtual bool Validate() const;
5395  virtual size_t GetAllocationCount() const;
5396  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
5397  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5398  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
5399 
5400  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5401  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5402 
5403 #if VMA_STATS_STRING_ENABLED
5404  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5405 #endif
5406 
5407  virtual bool CreateAllocationRequest(
5408  uint32_t currentFrameIndex,
5409  uint32_t frameInUseCount,
5410  VkDeviceSize bufferImageGranularity,
5411  VkDeviceSize allocSize,
5412  VkDeviceSize allocAlignment,
5413  bool upperAddress,
5414  VmaSuballocationType allocType,
5415  bool canMakeOtherLost,
5416  uint32_t strategy,
5417  VmaAllocationRequest* pAllocationRequest);
5418 
5419  virtual bool MakeRequestedAllocationsLost(
5420  uint32_t currentFrameIndex,
5421  uint32_t frameInUseCount,
5422  VmaAllocationRequest* pAllocationRequest);
5423 
5424  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5425 
5426  virtual VkResult CheckCorruption(const void* pBlockData);
5427 
5428  virtual void Alloc(
5429  const VmaAllocationRequest& request,
5430  VmaSuballocationType type,
5431  VkDeviceSize allocSize,
5432  bool upperAddress,
5433  VmaAllocation hAllocation);
5434 
5435  virtual void Free(const VmaAllocation allocation);
5436  virtual void FreeAtOffset(VkDeviceSize offset);
5437 
5438 private:
5439  /*
5440  There are two suballocation vectors, used in ping-pong way.
5441  The one with index m_1stVectorIndex is called 1st.
5442  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
5443  2nd can be non-empty only when 1st is not empty.
5444  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
5445  */
5446  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
5447 
5448  enum SECOND_VECTOR_MODE
5449  {
5450  SECOND_VECTOR_EMPTY,
5451  /*
5452  Suballocations in 2nd vector are created later than the ones in 1st, but they
5453  all have smaller offset.
5454  */
5455  SECOND_VECTOR_RING_BUFFER,
5456  /*
5457  Suballocations in 2nd vector are upper side of double stack.
5458  They all have offsets higher than those in 1st vector.
5459  Top of this stack means smaller offsets, but higher indices in this vector.
5460  */
5461  SECOND_VECTOR_DOUBLE_STACK,
5462  };
5463 
5464  VkDeviceSize m_SumFreeSize;
5465  SuballocationVectorType m_Suballocations0, m_Suballocations1;
5466  uint32_t m_1stVectorIndex;
5467  SECOND_VECTOR_MODE m_2ndVectorMode;
5468 
5469  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5470  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5471  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
5472  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
5473 
5474  // Number of items in 1st vector with hAllocation = null at the beginning.
5475  size_t m_1stNullItemsBeginCount;
5476  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
5477  size_t m_1stNullItemsMiddleCount;
5478  // Number of items in 2nd vector with hAllocation = null.
5479  size_t m_2ndNullItemsCount;
5480 
5481  bool ShouldCompact1st() const;
5482  void CleanupAfterFree();
5483 };
5484 
5485 /*
5486 - GetSize() is the original size of allocated memory block.
5487 - m_UsableSize is this size aligned down to a power of two.
5488  All allocations and calculations happen relative to m_UsableSize.
5489 - GetUnusableSize() is the difference between them.
5490  It is repoted as separate, unused range, not available for allocations.
5491 
5492 Node at level 0 has size = m_UsableSize.
5493 Each next level contains nodes with size 2 times smaller than current level.
5494 m_LevelCount is the maximum number of levels to use in the current object.
5495 */
5496 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
5497 {
5498  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
5499 public:
5500  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
5501  virtual ~VmaBlockMetadata_Buddy();
5502  virtual void Init(VkDeviceSize size);
5503 
5504  virtual bool Validate() const;
5505  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
5506  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
5507  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
5508  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
5509 
5510  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
5511  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
5512 
5513 #if VMA_STATS_STRING_ENABLED
5514  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
5515 #endif
5516 
5517  virtual bool CreateAllocationRequest(
5518  uint32_t currentFrameIndex,
5519  uint32_t frameInUseCount,
5520  VkDeviceSize bufferImageGranularity,
5521  VkDeviceSize allocSize,
5522  VkDeviceSize allocAlignment,
5523  bool upperAddress,
5524  VmaSuballocationType allocType,
5525  bool canMakeOtherLost,
5526  uint32_t strategy,
5527  VmaAllocationRequest* pAllocationRequest);
5528 
5529  virtual bool MakeRequestedAllocationsLost(
5530  uint32_t currentFrameIndex,
5531  uint32_t frameInUseCount,
5532  VmaAllocationRequest* pAllocationRequest);
5533 
5534  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
5535 
5536  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
5537 
5538  virtual void Alloc(
5539  const VmaAllocationRequest& request,
5540  VmaSuballocationType type,
5541  VkDeviceSize allocSize,
5542  bool upperAddress,
5543  VmaAllocation hAllocation);
5544 
5545  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
5546  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
5547 
5548 private:
5549  static const VkDeviceSize MIN_NODE_SIZE = 32;
5550  static const size_t MAX_LEVELS = 30;
5551 
5552  struct ValidationContext
5553  {
5554  size_t calculatedAllocationCount;
5555  size_t calculatedFreeCount;
5556  VkDeviceSize calculatedSumFreeSize;
5557 
5558  ValidationContext() :
5559  calculatedAllocationCount(0),
5560  calculatedFreeCount(0),
5561  calculatedSumFreeSize(0) { }
5562  };
5563 
5564  struct Node
5565  {
5566  VkDeviceSize offset;
5567  enum TYPE
5568  {
5569  TYPE_FREE,
5570  TYPE_ALLOCATION,
5571  TYPE_SPLIT,
5572  TYPE_COUNT
5573  } type;
5574  Node* parent;
5575  Node* buddy;
5576 
5577  union
5578  {
5579  struct
5580  {
5581  Node* prev;
5582  Node* next;
5583  } free;
5584  struct
5585  {
5586  VmaAllocation alloc;
5587  } allocation;
5588  struct
5589  {
5590  Node* leftChild;
5591  } split;
5592  };
5593  };
5594 
5595  // Size of the memory block aligned down to a power of two.
5596  VkDeviceSize m_UsableSize;
5597  uint32_t m_LevelCount;
5598 
5599  Node* m_Root;
5600  struct {
5601  Node* front;
5602  Node* back;
5603  } m_FreeList[MAX_LEVELS];
5604  // Number of nodes in the tree with type == TYPE_ALLOCATION.
5605  size_t m_AllocationCount;
5606  // Number of nodes in the tree with type == TYPE_FREE.
5607  size_t m_FreeCount;
5608  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
5609  VkDeviceSize m_SumFreeSize;
5610 
5611  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
5612  void DeleteNode(Node* node);
5613  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
5614  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
5615  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
5616  // Alloc passed just for validation. Can be null.
5617  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
5618  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
5619  // Adds node to the front of FreeList at given level.
5620  // node->type must be FREE.
5621  // node->free.prev, next can be undefined.
5622  void AddToFreeListFront(uint32_t level, Node* node);
5623  // Removes node from FreeList at given level.
5624  // node->type must be FREE.
5625  // node->free.prev, next stay untouched.
5626  void RemoveFromFreeList(uint32_t level, Node* node);
5627 
5628 #if VMA_STATS_STRING_ENABLED
5629  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
5630 #endif
5631 };
5632 
5633 /*
5634 Represents a single block of device memory (`VkDeviceMemory`) with all the
5635 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
5636 
5637 Thread-safety: This class must be externally synchronized.
5638 */
5639 class VmaDeviceMemoryBlock
5640 {
5641  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
5642 public:
5643  VmaBlockMetadata* m_pMetadata;
5644 
5645  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
5646 
5647  ~VmaDeviceMemoryBlock()
5648  {
5649  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
5650  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
5651  }
5652 
5653  // Always call after construction.
5654  void Init(
5655  VmaAllocator hAllocator,
5656  uint32_t newMemoryTypeIndex,
5657  VkDeviceMemory newMemory,
5658  VkDeviceSize newSize,
5659  uint32_t id,
5660  uint32_t algorithm);
5661  // Always call before destruction.
5662  void Destroy(VmaAllocator allocator);
5663 
5664  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
5665  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5666  uint32_t GetId() const { return m_Id; }
5667  void* GetMappedData() const { return m_pMappedData; }
5668 
5669  // Validates all data structures inside this object. If not valid, returns false.
5670  bool Validate() const;
5671 
5672  VkResult CheckCorruption(VmaAllocator hAllocator);
5673 
5674  // ppData can be null.
5675  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
5676  void Unmap(VmaAllocator hAllocator, uint32_t count);
5677 
5678  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5679  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
5680 
5681  VkResult BindBufferMemory(
5682  const VmaAllocator hAllocator,
5683  const VmaAllocation hAllocation,
5684  VkBuffer hBuffer);
5685  VkResult BindImageMemory(
5686  const VmaAllocator hAllocator,
5687  const VmaAllocation hAllocation,
5688  VkImage hImage);
5689 
5690 private:
5691  uint32_t m_MemoryTypeIndex;
5692  uint32_t m_Id;
5693  VkDeviceMemory m_hMemory;
5694 
5695  /*
5696  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
5697  Also protects m_MapCount, m_pMappedData.
5698  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
5699  */
5700  VMA_MUTEX m_Mutex;
5701  uint32_t m_MapCount;
5702  void* m_pMappedData;
5703 };
5704 
5705 struct VmaPointerLess
5706 {
5707  bool operator()(const void* lhs, const void* rhs) const
5708  {
5709  return lhs < rhs;
5710  }
5711 };
5712 
5713 struct VmaDefragmentationMove
5714 {
5715  size_t srcBlockIndex;
5716  size_t dstBlockIndex;
5717  VkDeviceSize srcOffset;
5718  VkDeviceSize dstOffset;
5719  VkDeviceSize size;
5720 };
5721 
5722 class VmaDefragmentationAlgorithm;
5723 
5724 /*
5725 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
5726 Vulkan memory type.
5727 
5728 Synchronized internally with a mutex.
5729 */
5730 struct VmaBlockVector
5731 {
5732  VMA_CLASS_NO_COPY(VmaBlockVector)
5733 public:
5734  VmaBlockVector(
5735  VmaAllocator hAllocator,
5736  uint32_t memoryTypeIndex,
5737  VkDeviceSize preferredBlockSize,
5738  size_t minBlockCount,
5739  size_t maxBlockCount,
5740  VkDeviceSize bufferImageGranularity,
5741  uint32_t frameInUseCount,
5742  bool isCustomPool,
5743  bool explicitBlockSize,
5744  uint32_t algorithm);
5745  ~VmaBlockVector();
5746 
5747  VkResult CreateMinBlocks();
5748 
5749  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
5750  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
5751  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
5752  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
5753  uint32_t GetAlgorithm() const { return m_Algorithm; }
5754 
5755  void GetPoolStats(VmaPoolStats* pStats);
5756 
5757  bool IsEmpty() const { return m_Blocks.empty(); }
5758  bool IsCorruptionDetectionEnabled() const;
5759 
5760  VkResult Allocate(
5761  VmaPool hCurrentPool,
5762  uint32_t currentFrameIndex,
5763  VkDeviceSize size,
5764  VkDeviceSize alignment,
5765  const VmaAllocationCreateInfo& createInfo,
5766  VmaSuballocationType suballocType,
5767  VmaAllocation* pAllocation);
5768 
5769  void Free(
5770  VmaAllocation hAllocation);
5771 
5772  // Adds statistics of this BlockVector to pStats.
5773  void AddStats(VmaStats* pStats);
5774 
5775 #if VMA_STATS_STRING_ENABLED
5776  void PrintDetailedMap(class VmaJsonWriter& json);
5777 #endif
5778 
5779  void MakePoolAllocationsLost(
5780  uint32_t currentFrameIndex,
5781  size_t* pLostAllocationCount);
5782  VkResult CheckCorruption();
5783 
5784  // Saves results in pCtx->res.
5785  void Defragment(
5786  class VmaBlockVectorDefragmentationContext* pCtx,
5787  VmaDefragmentationStats* pStats,
5788  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
5789  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
5790  VkCommandBuffer commandBuffer);
5791  void DefragmentationEnd(
5792  class VmaBlockVectorDefragmentationContext* pCtx,
5793  VmaDefragmentationStats* pStats);
5794 
5796  // To be used only while the m_Mutex is locked. Used during defragmentation.
5797 
5798  size_t GetBlockCount() const { return m_Blocks.size(); }
5799  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
5800  size_t CalcAllocationCount() const;
5801  bool IsBufferImageGranularityConflictPossible() const;
5802 
5803 private:
5804  friend class VmaDefragmentationAlgorithm_Generic;
5805 
5806  const VmaAllocator m_hAllocator;
5807  const uint32_t m_MemoryTypeIndex;
5808  const VkDeviceSize m_PreferredBlockSize;
5809  const size_t m_MinBlockCount;
5810  const size_t m_MaxBlockCount;
5811  const VkDeviceSize m_BufferImageGranularity;
5812  const uint32_t m_FrameInUseCount;
5813  const bool m_IsCustomPool;
5814  const bool m_ExplicitBlockSize;
5815  const uint32_t m_Algorithm;
5816  /* There can be at most one allocation that is completely empty - a
5817  hysteresis to avoid pessimistic case of alternating creation and destruction
5818  of a VkDeviceMemory. */
5819  bool m_HasEmptyBlock;
5820  VMA_RW_MUTEX m_Mutex;
5821  // Incrementally sorted by sumFreeSize, ascending.
5822  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
5823  uint32_t m_NextBlockId;
5824 
5825  VkDeviceSize CalcMaxBlockSize() const;
5826 
5827  // Finds and removes given block from vector.
5828  void Remove(VmaDeviceMemoryBlock* pBlock);
5829 
5830  // Performs single step in sorting m_Blocks. They may not be fully sorted
5831  // after this call.
5832  void IncrementallySortBlocks();
5833 
5834  // To be used only without CAN_MAKE_OTHER_LOST flag.
5835  VkResult AllocateFromBlock(
5836  VmaDeviceMemoryBlock* pBlock,
5837  VmaPool hCurrentPool,
5838  uint32_t currentFrameIndex,
5839  VkDeviceSize size,
5840  VkDeviceSize alignment,
5841  VmaAllocationCreateFlags allocFlags,
5842  void* pUserData,
5843  VmaSuballocationType suballocType,
5844  uint32_t strategy,
5845  VmaAllocation* pAllocation);
5846 
5847  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
5848 
5849  // Saves result to pCtx->res.
5850  void ApplyDefragmentationMovesCpu(
5851  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5852  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
5853  // Saves result to pCtx->res.
5854  void ApplyDefragmentationMovesGpu(
5855  class VmaBlockVectorDefragmentationContext* pDefragCtx,
5856  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5857  VkCommandBuffer commandBuffer);
5858 
5859  /*
5860  Used during defragmentation. pDefragmentationStats is optional. It's in/out
5861  - updated with new data.
5862  */
5863  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
5864 };
5865 
5866 struct VmaPool_T
5867 {
5868  VMA_CLASS_NO_COPY(VmaPool_T)
5869 public:
5870  VmaBlockVector m_BlockVector;
5871 
5872  VmaPool_T(
5873  VmaAllocator hAllocator,
5874  const VmaPoolCreateInfo& createInfo,
5875  VkDeviceSize preferredBlockSize);
5876  ~VmaPool_T();
5877 
5878  uint32_t GetId() const { return m_Id; }
5879  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
5880 
5881 #if VMA_STATS_STRING_ENABLED
5882  //void PrintDetailedMap(class VmaStringBuilder& sb);
5883 #endif
5884 
5885 private:
5886  uint32_t m_Id;
5887 };
5888 
5889 /*
5890 Performs defragmentation:
5891 
5892 - Updates `pBlockVector->m_pMetadata`.
5893 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
5894 - Does not move actual data, only returns requested moves as `moves`.
5895 */
5896 class VmaDefragmentationAlgorithm
5897 {
5898  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
5899 public:
5900  VmaDefragmentationAlgorithm(
5901  VmaAllocator hAllocator,
5902  VmaBlockVector* pBlockVector,
5903  uint32_t currentFrameIndex) :
5904  m_hAllocator(hAllocator),
5905  m_pBlockVector(pBlockVector),
5906  m_CurrentFrameIndex(currentFrameIndex)
5907  {
5908  }
5909  virtual ~VmaDefragmentationAlgorithm()
5910  {
5911  }
5912 
5913  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
5914  virtual void AddAll() = 0;
5915 
5916  virtual VkResult Defragment(
5917  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5918  VkDeviceSize maxBytesToMove,
5919  uint32_t maxAllocationsToMove) = 0;
5920 
5921  virtual VkDeviceSize GetBytesMoved() const = 0;
5922  virtual uint32_t GetAllocationsMoved() const = 0;
5923 
5924 protected:
5925  VmaAllocator const m_hAllocator;
5926  VmaBlockVector* const m_pBlockVector;
5927  const uint32_t m_CurrentFrameIndex;
5928 
5929  struct AllocationInfo
5930  {
5931  VmaAllocation m_hAllocation;
5932  VkBool32* m_pChanged;
5933 
5934  AllocationInfo() :
5935  m_hAllocation(VK_NULL_HANDLE),
5936  m_pChanged(VMA_NULL)
5937  {
5938  }
5939  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
5940  m_hAllocation(hAlloc),
5941  m_pChanged(pChanged)
5942  {
5943  }
5944  };
5945 };
5946 
5947 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
5948 {
5949  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
5950 public:
5951  VmaDefragmentationAlgorithm_Generic(
5952  VmaAllocator hAllocator,
5953  VmaBlockVector* pBlockVector,
5954  uint32_t currentFrameIndex,
5955  bool overlappingMoveSupported);
5956  virtual ~VmaDefragmentationAlgorithm_Generic();
5957 
5958  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
5959  virtual void AddAll() { m_AllAllocations = true; }
5960 
5961  virtual VkResult Defragment(
5962  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
5963  VkDeviceSize maxBytesToMove,
5964  uint32_t maxAllocationsToMove);
5965 
5966  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
5967  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
5968 
5969 private:
5970  uint32_t m_AllocationCount;
5971  bool m_AllAllocations;
5972 
5973  VkDeviceSize m_BytesMoved;
5974  uint32_t m_AllocationsMoved;
5975 
5976  struct AllocationInfoSizeGreater
5977  {
5978  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5979  {
5980  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
5981  }
5982  };
5983 
5984  struct AllocationInfoOffsetGreater
5985  {
5986  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
5987  {
5988  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
5989  }
5990  };
5991 
5992  struct BlockInfo
5993  {
5994  size_t m_OriginalBlockIndex;
5995  VmaDeviceMemoryBlock* m_pBlock;
5996  bool m_HasNonMovableAllocations;
5997  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
5998 
5999  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
6000  m_OriginalBlockIndex(SIZE_MAX),
6001  m_pBlock(VMA_NULL),
6002  m_HasNonMovableAllocations(true),
6003  m_Allocations(pAllocationCallbacks)
6004  {
6005  }
6006 
6007  void CalcHasNonMovableAllocations()
6008  {
6009  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
6010  const size_t defragmentAllocCount = m_Allocations.size();
6011  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
6012  }
6013 
6014  void SortAllocationsBySizeDescending()
6015  {
6016  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
6017  }
6018 
6019  void SortAllocationsByOffsetDescending()
6020  {
6021  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
6022  }
6023  };
6024 
6025  struct BlockPointerLess
6026  {
6027  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
6028  {
6029  return pLhsBlockInfo->m_pBlock < pRhsBlock;
6030  }
6031  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6032  {
6033  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
6034  }
6035  };
6036 
6037  // 1. Blocks with some non-movable allocations go first.
6038  // 2. Blocks with smaller sumFreeSize go first.
6039  struct BlockInfoCompareMoveDestination
6040  {
6041  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
6042  {
6043  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
6044  {
6045  return true;
6046  }
6047  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
6048  {
6049  return false;
6050  }
6051  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
6052  {
6053  return true;
6054  }
6055  return false;
6056  }
6057  };
6058 
6059  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
6060  BlockInfoVector m_Blocks;
6061 
6062  VkResult DefragmentRound(
6063  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6064  VkDeviceSize maxBytesToMove,
6065  uint32_t maxAllocationsToMove);
6066 
6067  size_t CalcBlocksWithNonMovableCount() const;
6068 
6069  static bool MoveMakesSense(
6070  size_t dstBlockIndex, VkDeviceSize dstOffset,
6071  size_t srcBlockIndex, VkDeviceSize srcOffset);
6072 };
6073 
6074 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
6075 {
6076  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
6077 public:
6078  VmaDefragmentationAlgorithm_Fast(
6079  VmaAllocator hAllocator,
6080  VmaBlockVector* pBlockVector,
6081  uint32_t currentFrameIndex,
6082  bool overlappingMoveSupported);
6083  virtual ~VmaDefragmentationAlgorithm_Fast();
6084 
6085  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
6086  virtual void AddAll() { m_AllAllocations = true; }
6087 
6088  virtual VkResult Defragment(
6089  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
6090  VkDeviceSize maxBytesToMove,
6091  uint32_t maxAllocationsToMove);
6092 
6093  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
6094  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
6095 
6096 private:
6097  struct BlockInfo
6098  {
6099  size_t origBlockIndex;
6100  };
6101 
6102  class FreeSpaceDatabase
6103  {
6104  public:
6105  FreeSpaceDatabase()
6106  {
6107  FreeSpace s = {};
6108  s.blockInfoIndex = SIZE_MAX;
6109  for(size_t i = 0; i < MAX_COUNT; ++i)
6110  {
6111  m_FreeSpaces[i] = s;
6112  }
6113  }
6114 
6115  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
6116  {
6117  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6118  {
6119  return;
6120  }
6121 
6122  // Find first invalid or the smallest structure.
6123  size_t bestIndex = SIZE_MAX;
6124  for(size_t i = 0; i < MAX_COUNT; ++i)
6125  {
6126  // Empty structure.
6127  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
6128  {
6129  bestIndex = i;
6130  break;
6131  }
6132  if(m_FreeSpaces[i].size < size &&
6133  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
6134  {
6135  bestIndex = i;
6136  }
6137  }
6138 
6139  if(bestIndex != SIZE_MAX)
6140  {
6141  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
6142  m_FreeSpaces[bestIndex].offset = offset;
6143  m_FreeSpaces[bestIndex].size = size;
6144  }
6145  }
6146 
6147  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
6148  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
6149  {
6150  size_t bestIndex = SIZE_MAX;
6151  VkDeviceSize bestFreeSpaceAfter = 0;
6152  for(size_t i = 0; i < MAX_COUNT; ++i)
6153  {
6154  // Structure is valid.
6155  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
6156  {
6157  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
6158  // Allocation fits into this structure.
6159  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
6160  {
6161  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
6162  (dstOffset + size);
6163  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
6164  {
6165  bestIndex = i;
6166  bestFreeSpaceAfter = freeSpaceAfter;
6167  }
6168  }
6169  }
6170  }
6171 
6172  if(bestIndex != SIZE_MAX)
6173  {
6174  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
6175  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
6176 
6177  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
6178  {
6179  // Leave this structure for remaining empty space.
6180  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
6181  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
6182  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
6183  }
6184  else
6185  {
6186  // This structure becomes invalid.
6187  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
6188  }
6189 
6190  return true;
6191  }
6192 
6193  return false;
6194  }
6195 
6196  private:
6197  static const size_t MAX_COUNT = 4;
6198 
6199  struct FreeSpace
6200  {
6201  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
6202  VkDeviceSize offset;
6203  VkDeviceSize size;
6204  } m_FreeSpaces[MAX_COUNT];
6205  };
6206 
6207  const bool m_OverlappingMoveSupported;
6208 
6209  uint32_t m_AllocationCount;
6210  bool m_AllAllocations;
6211 
6212  VkDeviceSize m_BytesMoved;
6213  uint32_t m_AllocationsMoved;
6214 
6215  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
6216 
6217  void PreprocessMetadata();
6218  void PostprocessMetadata();
6219  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
6220 };
6221 
6222 struct VmaBlockDefragmentationContext
6223 {
6224 private:
6225  VMA_CLASS_NO_COPY(VmaBlockDefragmentationContext)
6226 public:
6227  enum BLOCK_FLAG
6228  {
6229  BLOCK_FLAG_USED = 0x00000001,
6230  };
6231  uint32_t flags;
6232  VkBuffer hBuffer;
6233 
6234  VmaBlockDefragmentationContext() :
6235  flags(0),
6236  hBuffer(VK_NULL_HANDLE)
6237  {
6238  }
6239 };
6240 
6241 class VmaBlockVectorDefragmentationContext
6242 {
6243  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
6244 public:
6245  VkResult res;
6246  bool mutexLocked;
6247  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
6248 
6249  VmaBlockVectorDefragmentationContext(
6250  VmaAllocator hAllocator,
6251  VmaPool hCustomPool, // Optional.
6252  VmaBlockVector* pBlockVector,
6253  uint32_t currFrameIndex,
6254  uint32_t flags);
6255  ~VmaBlockVectorDefragmentationContext();
6256 
6257  VmaPool GetCustomPool() const { return m_hCustomPool; }
6258  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
6259  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
6260 
6261  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
6262  void AddAll() { m_AllAllocations = true; }
6263 
6264  void Begin(bool overlappingMoveSupported);
6265 
6266 private:
6267  const VmaAllocator m_hAllocator;
6268  // Null if not from custom pool.
6269  const VmaPool m_hCustomPool;
6270  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
6271  VmaBlockVector* const m_pBlockVector;
6272  const uint32_t m_CurrFrameIndex;
6273  const uint32_t m_AlgorithmFlags;
6274  // Owner of this object.
6275  VmaDefragmentationAlgorithm* m_pAlgorithm;
6276 
6277  struct AllocInfo
6278  {
6279  VmaAllocation hAlloc;
6280  VkBool32* pChanged;
6281  };
6282  // Used between constructor and Begin.
6283  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
6284  bool m_AllAllocations;
6285 };
6286 
6287 struct VmaDefragmentationContext_T
6288 {
6289 private:
6290  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
6291 public:
6292  VmaDefragmentationContext_T(
6293  VmaAllocator hAllocator,
6294  uint32_t currFrameIndex,
6295  uint32_t flags,
6296  VmaDefragmentationStats* pStats);
6297  ~VmaDefragmentationContext_T();
6298 
6299  void AddPools(uint32_t poolCount, VmaPool* pPools);
6300  void AddAllocations(
6301  uint32_t allocationCount,
6302  VmaAllocation* pAllocations,
6303  VkBool32* pAllocationsChanged);
6304 
6305  /*
6306  Returns:
6307  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
6308  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
6309  - Negative value if error occured and object can be destroyed immediately.
6310  */
6311  VkResult Defragment(
6312  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
6313  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
6314  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
6315 
6316 private:
6317  const VmaAllocator m_hAllocator;
6318  const uint32_t m_CurrFrameIndex;
6319  const uint32_t m_Flags;
6320  VmaDefragmentationStats* const m_pStats;
6321  // Owner of these objects.
6322  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
6323  // Owner of these objects.
6324  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
6325 };
6326 
6327 #if VMA_RECORDING_ENABLED
6328 
6329 class VmaRecorder
6330 {
6331 public:
6332  VmaRecorder();
6333  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
6334  void WriteConfiguration(
6335  const VkPhysicalDeviceProperties& devProps,
6336  const VkPhysicalDeviceMemoryProperties& memProps,
6337  bool dedicatedAllocationExtensionEnabled);
6338  ~VmaRecorder();
6339 
6340  void RecordCreateAllocator(uint32_t frameIndex);
6341  void RecordDestroyAllocator(uint32_t frameIndex);
6342  void RecordCreatePool(uint32_t frameIndex,
6343  const VmaPoolCreateInfo& createInfo,
6344  VmaPool pool);
6345  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
6346  void RecordAllocateMemory(uint32_t frameIndex,
6347  const VkMemoryRequirements& vkMemReq,
6348  const VmaAllocationCreateInfo& createInfo,
6349  VmaAllocation allocation);
6350  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
6351  const VkMemoryRequirements& vkMemReq,
6352  bool requiresDedicatedAllocation,
6353  bool prefersDedicatedAllocation,
6354  const VmaAllocationCreateInfo& createInfo,
6355  VmaAllocation allocation);
6356  void RecordAllocateMemoryForImage(uint32_t frameIndex,
6357  const VkMemoryRequirements& vkMemReq,
6358  bool requiresDedicatedAllocation,
6359  bool prefersDedicatedAllocation,
6360  const VmaAllocationCreateInfo& createInfo,
6361  VmaAllocation allocation);
6362  void RecordFreeMemory(uint32_t frameIndex,
6363  VmaAllocation allocation);
6364  void RecordResizeAllocation(
6365  uint32_t frameIndex,
6366  VmaAllocation allocation,
6367  VkDeviceSize newSize);
6368  void RecordSetAllocationUserData(uint32_t frameIndex,
6369  VmaAllocation allocation,
6370  const void* pUserData);
6371  void RecordCreateLostAllocation(uint32_t frameIndex,
6372  VmaAllocation allocation);
6373  void RecordMapMemory(uint32_t frameIndex,
6374  VmaAllocation allocation);
6375  void RecordUnmapMemory(uint32_t frameIndex,
6376  VmaAllocation allocation);
6377  void RecordFlushAllocation(uint32_t frameIndex,
6378  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6379  void RecordInvalidateAllocation(uint32_t frameIndex,
6380  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
6381  void RecordCreateBuffer(uint32_t frameIndex,
6382  const VkBufferCreateInfo& bufCreateInfo,
6383  const VmaAllocationCreateInfo& allocCreateInfo,
6384  VmaAllocation allocation);
6385  void RecordCreateImage(uint32_t frameIndex,
6386  const VkImageCreateInfo& imageCreateInfo,
6387  const VmaAllocationCreateInfo& allocCreateInfo,
6388  VmaAllocation allocation);
6389  void RecordDestroyBuffer(uint32_t frameIndex,
6390  VmaAllocation allocation);
6391  void RecordDestroyImage(uint32_t frameIndex,
6392  VmaAllocation allocation);
6393  void RecordTouchAllocation(uint32_t frameIndex,
6394  VmaAllocation allocation);
6395  void RecordGetAllocationInfo(uint32_t frameIndex,
6396  VmaAllocation allocation);
6397  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
6398  VmaPool pool);
6399  void RecordDefragmentationBegin(uint32_t frameIndex,
6400  const VmaDefragmentationInfo2& info,
6402  void RecordDefragmentationEnd(uint32_t frameIndex,
6404 
6405 private:
6406  struct CallParams
6407  {
6408  uint32_t threadId;
6409  double time;
6410  };
6411 
6412  class UserDataString
6413  {
6414  public:
6415  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
6416  const char* GetString() const { return m_Str; }
6417 
6418  private:
6419  char m_PtrStr[17];
6420  const char* m_Str;
6421  };
6422 
6423  bool m_UseMutex;
6424  VmaRecordFlags m_Flags;
6425  FILE* m_File;
6426  VMA_MUTEX m_FileMutex;
6427  int64_t m_Freq;
6428  int64_t m_StartCounter;
6429 
6430  void GetBasicParams(CallParams& outParams);
6431 
6432  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
6433  template<typename T>
6434  void PrintPointerList(uint64_t count, const T* pItems)
6435  {
6436  if(count)
6437  {
6438  fprintf(m_File, "%p", pItems[0]);
6439  for(uint64_t i = 1; i < count; ++i)
6440  {
6441  fprintf(m_File, " %p", pItems[i]);
6442  }
6443  }
6444  }
6445 
6446  void Flush();
6447 };
6448 
6449 #endif // #if VMA_RECORDING_ENABLED
6450 
6451 // Main allocator object.
6452 struct VmaAllocator_T
6453 {
6454  VMA_CLASS_NO_COPY(VmaAllocator_T)
6455 public:
6456  bool m_UseMutex;
6457  bool m_UseKhrDedicatedAllocation;
6458  VkDevice m_hDevice;
6459  bool m_AllocationCallbacksSpecified;
6460  VkAllocationCallbacks m_AllocationCallbacks;
6461  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
6462 
6463  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
6464  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
6465  VMA_MUTEX m_HeapSizeLimitMutex;
6466 
6467  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
6468  VkPhysicalDeviceMemoryProperties m_MemProps;
6469 
6470  // Default pools.
6471  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
6472 
6473  // Each vector is sorted by memory (handle value).
6474  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
6475  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
6476  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
6477 
6478  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
6479  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
6480  ~VmaAllocator_T();
6481 
6482  const VkAllocationCallbacks* GetAllocationCallbacks() const
6483  {
6484  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
6485  }
6486  const VmaVulkanFunctions& GetVulkanFunctions() const
6487  {
6488  return m_VulkanFunctions;
6489  }
6490 
6491  VkDeviceSize GetBufferImageGranularity() const
6492  {
6493  return VMA_MAX(
6494  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
6495  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
6496  }
6497 
6498  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
6499  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
6500 
6501  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
6502  {
6503  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
6504  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
6505  }
6506  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
6507  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
6508  {
6509  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
6510  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
6511  }
6512  // Minimum alignment for all allocations in specific memory type.
6513  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
6514  {
6515  return IsMemoryTypeNonCoherent(memTypeIndex) ?
6516  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
6517  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
6518  }
6519 
6520  bool IsIntegratedGpu() const
6521  {
6522  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
6523  }
6524 
6525 #if VMA_RECORDING_ENABLED
6526  VmaRecorder* GetRecorder() const { return m_pRecorder; }
6527 #endif
6528 
6529  void GetBufferMemoryRequirements(
6530  VkBuffer hBuffer,
6531  VkMemoryRequirements& memReq,
6532  bool& requiresDedicatedAllocation,
6533  bool& prefersDedicatedAllocation) const;
6534  void GetImageMemoryRequirements(
6535  VkImage hImage,
6536  VkMemoryRequirements& memReq,
6537  bool& requiresDedicatedAllocation,
6538  bool& prefersDedicatedAllocation) const;
6539 
6540  // Main allocation function.
6541  VkResult AllocateMemory(
6542  const VkMemoryRequirements& vkMemReq,
6543  bool requiresDedicatedAllocation,
6544  bool prefersDedicatedAllocation,
6545  VkBuffer dedicatedBuffer,
6546  VkImage dedicatedImage,
6547  const VmaAllocationCreateInfo& createInfo,
6548  VmaSuballocationType suballocType,
6549  VmaAllocation* pAllocation);
6550 
6551  // Main deallocation function.
6552  void FreeMemory(const VmaAllocation allocation);
6553 
6554  VkResult ResizeAllocation(
6555  const VmaAllocation alloc,
6556  VkDeviceSize newSize);
6557 
6558  void CalculateStats(VmaStats* pStats);
6559 
6560 #if VMA_STATS_STRING_ENABLED
6561  void PrintDetailedMap(class VmaJsonWriter& json);
6562 #endif
6563 
6564  VkResult DefragmentationBegin(
6565  const VmaDefragmentationInfo2& info,
6566  VmaDefragmentationStats* pStats,
6567  VmaDefragmentationContext* pContext);
6568  VkResult DefragmentationEnd(
6569  VmaDefragmentationContext context);
6570 
6571  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
6572  bool TouchAllocation(VmaAllocation hAllocation);
6573 
6574  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
6575  void DestroyPool(VmaPool pool);
6576  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
6577 
6578  void SetCurrentFrameIndex(uint32_t frameIndex);
6579  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
6580 
6581  void MakePoolAllocationsLost(
6582  VmaPool hPool,
6583  size_t* pLostAllocationCount);
6584  VkResult CheckPoolCorruption(VmaPool hPool);
6585  VkResult CheckCorruption(uint32_t memoryTypeBits);
6586 
6587  void CreateLostAllocation(VmaAllocation* pAllocation);
6588 
6589  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
6590  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
6591 
6592  VkResult Map(VmaAllocation hAllocation, void** ppData);
6593  void Unmap(VmaAllocation hAllocation);
6594 
6595  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
6596  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
6597 
6598  void FlushOrInvalidateAllocation(
6599  VmaAllocation hAllocation,
6600  VkDeviceSize offset, VkDeviceSize size,
6601  VMA_CACHE_OPERATION op);
6602 
6603  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
6604 
6605 private:
6606  VkDeviceSize m_PreferredLargeHeapBlockSize;
6607 
6608  VkPhysicalDevice m_PhysicalDevice;
6609  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
6610 
6611  VMA_RW_MUTEX m_PoolsMutex;
6612  // Protected by m_PoolsMutex. Sorted by pointer value.
6613  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
6614  uint32_t m_NextPoolId;
6615 
6616  VmaVulkanFunctions m_VulkanFunctions;
6617 
6618 #if VMA_RECORDING_ENABLED
6619  VmaRecorder* m_pRecorder;
6620 #endif
6621 
6622  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
6623 
6624  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
6625 
6626  VkResult AllocateMemoryOfType(
6627  VkDeviceSize size,
6628  VkDeviceSize alignment,
6629  bool dedicatedAllocation,
6630  VkBuffer dedicatedBuffer,
6631  VkImage dedicatedImage,
6632  const VmaAllocationCreateInfo& createInfo,
6633  uint32_t memTypeIndex,
6634  VmaSuballocationType suballocType,
6635  VmaAllocation* pAllocation);
6636 
6637  // Allocates and registers new VkDeviceMemory specifically for single allocation.
6638  VkResult AllocateDedicatedMemory(
6639  VkDeviceSize size,
6640  VmaSuballocationType suballocType,
6641  uint32_t memTypeIndex,
6642  bool map,
6643  bool isUserDataString,
6644  void* pUserData,
6645  VkBuffer dedicatedBuffer,
6646  VkImage dedicatedImage,
6647  VmaAllocation* pAllocation);
6648 
6649  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
6650  void FreeDedicatedMemory(VmaAllocation allocation);
6651 };
6652 
6654 // Memory allocation #2 after VmaAllocator_T definition
6655 
6656 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
6657 {
6658  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
6659 }
6660 
6661 static void VmaFree(VmaAllocator hAllocator, void* ptr)
6662 {
6663  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
6664 }
6665 
6666 template<typename T>
6667 static T* VmaAllocate(VmaAllocator hAllocator)
6668 {
6669  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
6670 }
6671 
6672 template<typename T>
6673 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
6674 {
6675  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
6676 }
6677 
6678 template<typename T>
6679 static void vma_delete(VmaAllocator hAllocator, T* ptr)
6680 {
6681  if(ptr != VMA_NULL)
6682  {
6683  ptr->~T();
6684  VmaFree(hAllocator, ptr);
6685  }
6686 }
6687 
6688 template<typename T>
6689 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
6690 {
6691  if(ptr != VMA_NULL)
6692  {
6693  for(size_t i = count; i--; )
6694  ptr[i].~T();
6695  VmaFree(hAllocator, ptr);
6696  }
6697 }
6698 
6700 // VmaStringBuilder
6701 
6702 #if VMA_STATS_STRING_ENABLED
6703 
6704 class VmaStringBuilder
6705 {
6706 public:
6707  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
6708  size_t GetLength() const { return m_Data.size(); }
6709  const char* GetData() const { return m_Data.data(); }
6710 
6711  void Add(char ch) { m_Data.push_back(ch); }
6712  void Add(const char* pStr);
6713  void AddNewLine() { Add('\n'); }
6714  void AddNumber(uint32_t num);
6715  void AddNumber(uint64_t num);
6716  void AddPointer(const void* ptr);
6717 
6718 private:
6719  VmaVector< char, VmaStlAllocator<char> > m_Data;
6720 };
6721 
6722 void VmaStringBuilder::Add(const char* pStr)
6723 {
6724  const size_t strLen = strlen(pStr);
6725  if(strLen > 0)
6726  {
6727  const size_t oldCount = m_Data.size();
6728  m_Data.resize(oldCount + strLen);
6729  memcpy(m_Data.data() + oldCount, pStr, strLen);
6730  }
6731 }
6732 
6733 void VmaStringBuilder::AddNumber(uint32_t num)
6734 {
6735  char buf[11];
6736  VmaUint32ToStr(buf, sizeof(buf), num);
6737  Add(buf);
6738 }
6739 
6740 void VmaStringBuilder::AddNumber(uint64_t num)
6741 {
6742  char buf[21];
6743  VmaUint64ToStr(buf, sizeof(buf), num);
6744  Add(buf);
6745 }
6746 
6747 void VmaStringBuilder::AddPointer(const void* ptr)
6748 {
6749  char buf[21];
6750  VmaPtrToStr(buf, sizeof(buf), ptr);
6751  Add(buf);
6752 }
6753 
6754 #endif // #if VMA_STATS_STRING_ENABLED
6755 
6757 // VmaJsonWriter
6758 
6759 #if VMA_STATS_STRING_ENABLED
6760 
6761 class VmaJsonWriter
6762 {
6763  VMA_CLASS_NO_COPY(VmaJsonWriter)
6764 public:
6765  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
6766  ~VmaJsonWriter();
6767 
6768  void BeginObject(bool singleLine = false);
6769  void EndObject();
6770 
6771  void BeginArray(bool singleLine = false);
6772  void EndArray();
6773 
6774  void WriteString(const char* pStr);
6775  void BeginString(const char* pStr = VMA_NULL);
6776  void ContinueString(const char* pStr);
6777  void ContinueString(uint32_t n);
6778  void ContinueString(uint64_t n);
6779  void ContinueString_Pointer(const void* ptr);
6780  void EndString(const char* pStr = VMA_NULL);
6781 
6782  void WriteNumber(uint32_t n);
6783  void WriteNumber(uint64_t n);
6784  void WriteBool(bool b);
6785  void WriteNull();
6786 
6787 private:
6788  static const char* const INDENT;
6789 
6790  enum COLLECTION_TYPE
6791  {
6792  COLLECTION_TYPE_OBJECT,
6793  COLLECTION_TYPE_ARRAY,
6794  };
6795  struct StackItem
6796  {
6797  COLLECTION_TYPE type;
6798  uint32_t valueCount;
6799  bool singleLineMode;
6800  };
6801 
6802  VmaStringBuilder& m_SB;
6803  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
6804  bool m_InsideString;
6805 
6806  void BeginValue(bool isString);
6807  void WriteIndent(bool oneLess = false);
6808 };
6809 
6810 const char* const VmaJsonWriter::INDENT = " ";
6811 
6812 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
6813  m_SB(sb),
6814  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
6815  m_InsideString(false)
6816 {
6817 }
6818 
6819 VmaJsonWriter::~VmaJsonWriter()
6820 {
6821  VMA_ASSERT(!m_InsideString);
6822  VMA_ASSERT(m_Stack.empty());
6823 }
6824 
6825 void VmaJsonWriter::BeginObject(bool singleLine)
6826 {
6827  VMA_ASSERT(!m_InsideString);
6828 
6829  BeginValue(false);
6830  m_SB.Add('{');
6831 
6832  StackItem item;
6833  item.type = COLLECTION_TYPE_OBJECT;
6834  item.valueCount = 0;
6835  item.singleLineMode = singleLine;
6836  m_Stack.push_back(item);
6837 }
6838 
6839 void VmaJsonWriter::EndObject()
6840 {
6841  VMA_ASSERT(!m_InsideString);
6842 
6843  WriteIndent(true);
6844  m_SB.Add('}');
6845 
6846  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
6847  m_Stack.pop_back();
6848 }
6849 
6850 void VmaJsonWriter::BeginArray(bool singleLine)
6851 {
6852  VMA_ASSERT(!m_InsideString);
6853 
6854  BeginValue(false);
6855  m_SB.Add('[');
6856 
6857  StackItem item;
6858  item.type = COLLECTION_TYPE_ARRAY;
6859  item.valueCount = 0;
6860  item.singleLineMode = singleLine;
6861  m_Stack.push_back(item);
6862 }
6863 
6864 void VmaJsonWriter::EndArray()
6865 {
6866  VMA_ASSERT(!m_InsideString);
6867 
6868  WriteIndent(true);
6869  m_SB.Add(']');
6870 
6871  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
6872  m_Stack.pop_back();
6873 }
6874 
6875 void VmaJsonWriter::WriteString(const char* pStr)
6876 {
6877  BeginString(pStr);
6878  EndString();
6879 }
6880 
6881 void VmaJsonWriter::BeginString(const char* pStr)
6882 {
6883  VMA_ASSERT(!m_InsideString);
6884 
6885  BeginValue(true);
6886  m_SB.Add('"');
6887  m_InsideString = true;
6888  if(pStr != VMA_NULL && pStr[0] != '\0')
6889  {
6890  ContinueString(pStr);
6891  }
6892 }
6893 
6894 void VmaJsonWriter::ContinueString(const char* pStr)
6895 {
6896  VMA_ASSERT(m_InsideString);
6897 
6898  const size_t strLen = strlen(pStr);
6899  for(size_t i = 0; i < strLen; ++i)
6900  {
6901  char ch = pStr[i];
6902  if(ch == '\\')
6903  {
6904  m_SB.Add("\\\\");
6905  }
6906  else if(ch == '"')
6907  {
6908  m_SB.Add("\\\"");
6909  }
6910  else if(ch >= 32)
6911  {
6912  m_SB.Add(ch);
6913  }
6914  else switch(ch)
6915  {
6916  case '\b':
6917  m_SB.Add("\\b");
6918  break;
6919  case '\f':
6920  m_SB.Add("\\f");
6921  break;
6922  case '\n':
6923  m_SB.Add("\\n");
6924  break;
6925  case '\r':
6926  m_SB.Add("\\r");
6927  break;
6928  case '\t':
6929  m_SB.Add("\\t");
6930  break;
6931  default:
6932  VMA_ASSERT(0 && "Character not currently supported.");
6933  break;
6934  }
6935  }
6936 }
6937 
6938 void VmaJsonWriter::ContinueString(uint32_t n)
6939 {
6940  VMA_ASSERT(m_InsideString);
6941  m_SB.AddNumber(n);
6942 }
6943 
6944 void VmaJsonWriter::ContinueString(uint64_t n)
6945 {
6946  VMA_ASSERT(m_InsideString);
6947  m_SB.AddNumber(n);
6948 }
6949 
6950 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
6951 {
6952  VMA_ASSERT(m_InsideString);
6953  m_SB.AddPointer(ptr);
6954 }
6955 
6956 void VmaJsonWriter::EndString(const char* pStr)
6957 {
6958  VMA_ASSERT(m_InsideString);
6959  if(pStr != VMA_NULL && pStr[0] != '\0')
6960  {
6961  ContinueString(pStr);
6962  }
6963  m_SB.Add('"');
6964  m_InsideString = false;
6965 }
6966 
6967 void VmaJsonWriter::WriteNumber(uint32_t n)
6968 {
6969  VMA_ASSERT(!m_InsideString);
6970  BeginValue(false);
6971  m_SB.AddNumber(n);
6972 }
6973 
6974 void VmaJsonWriter::WriteNumber(uint64_t n)
6975 {
6976  VMA_ASSERT(!m_InsideString);
6977  BeginValue(false);
6978  m_SB.AddNumber(n);
6979 }
6980 
6981 void VmaJsonWriter::WriteBool(bool b)
6982 {
6983  VMA_ASSERT(!m_InsideString);
6984  BeginValue(false);
6985  m_SB.Add(b ? "true" : "false");
6986 }
6987 
6988 void VmaJsonWriter::WriteNull()
6989 {
6990  VMA_ASSERT(!m_InsideString);
6991  BeginValue(false);
6992  m_SB.Add("null");
6993 }
6994 
6995 void VmaJsonWriter::BeginValue(bool isString)
6996 {
6997  if(!m_Stack.empty())
6998  {
6999  StackItem& currItem = m_Stack.back();
7000  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7001  currItem.valueCount % 2 == 0)
7002  {
7003  VMA_ASSERT(isString);
7004  }
7005 
7006  if(currItem.type == COLLECTION_TYPE_OBJECT &&
7007  currItem.valueCount % 2 != 0)
7008  {
7009  m_SB.Add(": ");
7010  }
7011  else if(currItem.valueCount > 0)
7012  {
7013  m_SB.Add(", ");
7014  WriteIndent();
7015  }
7016  else
7017  {
7018  WriteIndent();
7019  }
7020  ++currItem.valueCount;
7021  }
7022 }
7023 
7024 void VmaJsonWriter::WriteIndent(bool oneLess)
7025 {
7026  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
7027  {
7028  m_SB.AddNewLine();
7029 
7030  size_t count = m_Stack.size();
7031  if(count > 0 && oneLess)
7032  {
7033  --count;
7034  }
7035  for(size_t i = 0; i < count; ++i)
7036  {
7037  m_SB.Add(INDENT);
7038  }
7039  }
7040 }
7041 
7042 #endif // #if VMA_STATS_STRING_ENABLED
7043 
7045 
7046 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
7047 {
7048  if(IsUserDataString())
7049  {
7050  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
7051 
7052  FreeUserDataString(hAllocator);
7053 
7054  if(pUserData != VMA_NULL)
7055  {
7056  const char* const newStrSrc = (char*)pUserData;
7057  const size_t newStrLen = strlen(newStrSrc);
7058  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
7059  memcpy(newStrDst, newStrSrc, newStrLen + 1);
7060  m_pUserData = newStrDst;
7061  }
7062  }
7063  else
7064  {
7065  m_pUserData = pUserData;
7066  }
7067 }
7068 
7069 void VmaAllocation_T::ChangeBlockAllocation(
7070  VmaAllocator hAllocator,
7071  VmaDeviceMemoryBlock* block,
7072  VkDeviceSize offset)
7073 {
7074  VMA_ASSERT(block != VMA_NULL);
7075  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7076 
7077  // Move mapping reference counter from old block to new block.
7078  if(block != m_BlockAllocation.m_Block)
7079  {
7080  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
7081  if(IsPersistentMap())
7082  ++mapRefCount;
7083  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
7084  block->Map(hAllocator, mapRefCount, VMA_NULL);
7085  }
7086 
7087  m_BlockAllocation.m_Block = block;
7088  m_BlockAllocation.m_Offset = offset;
7089 }
7090 
7091 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
7092 {
7093  VMA_ASSERT(newSize > 0);
7094  m_Size = newSize;
7095 }
7096 
7097 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
7098 {
7099  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7100  m_BlockAllocation.m_Offset = newOffset;
7101 }
7102 
7103 VkDeviceSize VmaAllocation_T::GetOffset() const
7104 {
7105  switch(m_Type)
7106  {
7107  case ALLOCATION_TYPE_BLOCK:
7108  return m_BlockAllocation.m_Offset;
7109  case ALLOCATION_TYPE_DEDICATED:
7110  return 0;
7111  default:
7112  VMA_ASSERT(0);
7113  return 0;
7114  }
7115 }
7116 
7117 VkDeviceMemory VmaAllocation_T::GetMemory() const
7118 {
7119  switch(m_Type)
7120  {
7121  case ALLOCATION_TYPE_BLOCK:
7122  return m_BlockAllocation.m_Block->GetDeviceMemory();
7123  case ALLOCATION_TYPE_DEDICATED:
7124  return m_DedicatedAllocation.m_hMemory;
7125  default:
7126  VMA_ASSERT(0);
7127  return VK_NULL_HANDLE;
7128  }
7129 }
7130 
7131 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
7132 {
7133  switch(m_Type)
7134  {
7135  case ALLOCATION_TYPE_BLOCK:
7136  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
7137  case ALLOCATION_TYPE_DEDICATED:
7138  return m_DedicatedAllocation.m_MemoryTypeIndex;
7139  default:
7140  VMA_ASSERT(0);
7141  return UINT32_MAX;
7142  }
7143 }
7144 
7145 void* VmaAllocation_T::GetMappedData() const
7146 {
7147  switch(m_Type)
7148  {
7149  case ALLOCATION_TYPE_BLOCK:
7150  if(m_MapCount != 0)
7151  {
7152  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
7153  VMA_ASSERT(pBlockData != VMA_NULL);
7154  return (char*)pBlockData + m_BlockAllocation.m_Offset;
7155  }
7156  else
7157  {
7158  return VMA_NULL;
7159  }
7160  break;
7161  case ALLOCATION_TYPE_DEDICATED:
7162  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
7163  return m_DedicatedAllocation.m_pMappedData;
7164  default:
7165  VMA_ASSERT(0);
7166  return VMA_NULL;
7167  }
7168 }
7169 
7170 bool VmaAllocation_T::CanBecomeLost() const
7171 {
7172  switch(m_Type)
7173  {
7174  case ALLOCATION_TYPE_BLOCK:
7175  return m_BlockAllocation.m_CanBecomeLost;
7176  case ALLOCATION_TYPE_DEDICATED:
7177  return false;
7178  default:
7179  VMA_ASSERT(0);
7180  return false;
7181  }
7182 }
7183 
7184 VmaPool VmaAllocation_T::GetPool() const
7185 {
7186  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
7187  return m_BlockAllocation.m_hPool;
7188 }
7189 
7190 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7191 {
7192  VMA_ASSERT(CanBecomeLost());
7193 
7194  /*
7195  Warning: This is a carefully designed algorithm.
7196  Do not modify unless you really know what you're doing :)
7197  */
7198  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
7199  for(;;)
7200  {
7201  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
7202  {
7203  VMA_ASSERT(0);
7204  return false;
7205  }
7206  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
7207  {
7208  return false;
7209  }
7210  else // Last use time earlier than current time.
7211  {
7212  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
7213  {
7214  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
7215  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
7216  return true;
7217  }
7218  }
7219  }
7220 }
7221 
7222 #if VMA_STATS_STRING_ENABLED
7223 
7224 // Correspond to values of enum VmaSuballocationType.
7225 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
7226  "FREE",
7227  "UNKNOWN",
7228  "BUFFER",
7229  "IMAGE_UNKNOWN",
7230  "IMAGE_LINEAR",
7231  "IMAGE_OPTIMAL",
7232 };
7233 
7234 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
7235 {
7236  json.WriteString("Type");
7237  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
7238 
7239  json.WriteString("Size");
7240  json.WriteNumber(m_Size);
7241 
7242  if(m_pUserData != VMA_NULL)
7243  {
7244  json.WriteString("UserData");
7245  if(IsUserDataString())
7246  {
7247  json.WriteString((const char*)m_pUserData);
7248  }
7249  else
7250  {
7251  json.BeginString();
7252  json.ContinueString_Pointer(m_pUserData);
7253  json.EndString();
7254  }
7255  }
7256 
7257  json.WriteString("CreationFrameIndex");
7258  json.WriteNumber(m_CreationFrameIndex);
7259 
7260  json.WriteString("LastUseFrameIndex");
7261  json.WriteNumber(GetLastUseFrameIndex());
7262 
7263  if(m_BufferImageUsage != 0)
7264  {
7265  json.WriteString("Usage");
7266  json.WriteNumber(m_BufferImageUsage);
7267  }
7268 }
7269 
7270 #endif
7271 
7272 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
7273 {
7274  VMA_ASSERT(IsUserDataString());
7275  if(m_pUserData != VMA_NULL)
7276  {
7277  char* const oldStr = (char*)m_pUserData;
7278  const size_t oldStrLen = strlen(oldStr);
7279  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
7280  m_pUserData = VMA_NULL;
7281  }
7282 }
7283 
7284 void VmaAllocation_T::BlockAllocMap()
7285 {
7286  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7287 
7288  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7289  {
7290  ++m_MapCount;
7291  }
7292  else
7293  {
7294  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
7295  }
7296 }
7297 
7298 void VmaAllocation_T::BlockAllocUnmap()
7299 {
7300  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
7301 
7302  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7303  {
7304  --m_MapCount;
7305  }
7306  else
7307  {
7308  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
7309  }
7310 }
7311 
7312 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
7313 {
7314  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7315 
7316  if(m_MapCount != 0)
7317  {
7318  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
7319  {
7320  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
7321  *ppData = m_DedicatedAllocation.m_pMappedData;
7322  ++m_MapCount;
7323  return VK_SUCCESS;
7324  }
7325  else
7326  {
7327  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
7328  return VK_ERROR_MEMORY_MAP_FAILED;
7329  }
7330  }
7331  else
7332  {
7333  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
7334  hAllocator->m_hDevice,
7335  m_DedicatedAllocation.m_hMemory,
7336  0, // offset
7337  VK_WHOLE_SIZE,
7338  0, // flags
7339  ppData);
7340  if(result == VK_SUCCESS)
7341  {
7342  m_DedicatedAllocation.m_pMappedData = *ppData;
7343  m_MapCount = 1;
7344  }
7345  return result;
7346  }
7347 }
7348 
7349 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
7350 {
7351  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
7352 
7353  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
7354  {
7355  --m_MapCount;
7356  if(m_MapCount == 0)
7357  {
7358  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
7359  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
7360  hAllocator->m_hDevice,
7361  m_DedicatedAllocation.m_hMemory);
7362  }
7363  }
7364  else
7365  {
7366  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
7367  }
7368 }
7369 
7370 #if VMA_STATS_STRING_ENABLED
7371 
7372 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
7373 {
7374  json.BeginObject();
7375 
7376  json.WriteString("Blocks");
7377  json.WriteNumber(stat.blockCount);
7378 
7379  json.WriteString("Allocations");
7380  json.WriteNumber(stat.allocationCount);
7381 
7382  json.WriteString("UnusedRanges");
7383  json.WriteNumber(stat.unusedRangeCount);
7384 
7385  json.WriteString("UsedBytes");
7386  json.WriteNumber(stat.usedBytes);
7387 
7388  json.WriteString("UnusedBytes");
7389  json.WriteNumber(stat.unusedBytes);
7390 
7391  if(stat.allocationCount > 1)
7392  {
7393  json.WriteString("AllocationSize");
7394  json.BeginObject(true);
7395  json.WriteString("Min");
7396  json.WriteNumber(stat.allocationSizeMin);
7397  json.WriteString("Avg");
7398  json.WriteNumber(stat.allocationSizeAvg);
7399  json.WriteString("Max");
7400  json.WriteNumber(stat.allocationSizeMax);
7401  json.EndObject();
7402  }
7403 
7404  if(stat.unusedRangeCount > 1)
7405  {
7406  json.WriteString("UnusedRangeSize");
7407  json.BeginObject(true);
7408  json.WriteString("Min");
7409  json.WriteNumber(stat.unusedRangeSizeMin);
7410  json.WriteString("Avg");
7411  json.WriteNumber(stat.unusedRangeSizeAvg);
7412  json.WriteString("Max");
7413  json.WriteNumber(stat.unusedRangeSizeMax);
7414  json.EndObject();
7415  }
7416 
7417  json.EndObject();
7418 }
7419 
7420 #endif // #if VMA_STATS_STRING_ENABLED
7421 
7422 struct VmaSuballocationItemSizeLess
7423 {
7424  bool operator()(
7425  const VmaSuballocationList::iterator lhs,
7426  const VmaSuballocationList::iterator rhs) const
7427  {
7428  return lhs->size < rhs->size;
7429  }
7430  bool operator()(
7431  const VmaSuballocationList::iterator lhs,
7432  VkDeviceSize rhsSize) const
7433  {
7434  return lhs->size < rhsSize;
7435  }
7436 };
7437 
7438 
7440 // class VmaBlockMetadata
7441 
7442 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
7443  m_Size(0),
7444  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
7445 {
7446 }
7447 
7448 #if VMA_STATS_STRING_ENABLED
7449 
7450 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
7451  VkDeviceSize unusedBytes,
7452  size_t allocationCount,
7453  size_t unusedRangeCount) const
7454 {
7455  json.BeginObject();
7456 
7457  json.WriteString("TotalBytes");
7458  json.WriteNumber(GetSize());
7459 
7460  json.WriteString("UnusedBytes");
7461  json.WriteNumber(unusedBytes);
7462 
7463  json.WriteString("Allocations");
7464  json.WriteNumber((uint64_t)allocationCount);
7465 
7466  json.WriteString("UnusedRanges");
7467  json.WriteNumber((uint64_t)unusedRangeCount);
7468 
7469  json.WriteString("Suballocations");
7470  json.BeginArray();
7471 }
7472 
7473 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
7474  VkDeviceSize offset,
7475  VmaAllocation hAllocation) const
7476 {
7477  json.BeginObject(true);
7478 
7479  json.WriteString("Offset");
7480  json.WriteNumber(offset);
7481 
7482  hAllocation->PrintParameters(json);
7483 
7484  json.EndObject();
7485 }
7486 
7487 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
7488  VkDeviceSize offset,
7489  VkDeviceSize size) const
7490 {
7491  json.BeginObject(true);
7492 
7493  json.WriteString("Offset");
7494  json.WriteNumber(offset);
7495 
7496  json.WriteString("Type");
7497  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
7498 
7499  json.WriteString("Size");
7500  json.WriteNumber(size);
7501 
7502  json.EndObject();
7503 }
7504 
7505 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
7506 {
7507  json.EndArray();
7508  json.EndObject();
7509 }
7510 
7511 #endif // #if VMA_STATS_STRING_ENABLED
7512 
7514 // class VmaBlockMetadata_Generic
7515 
7516 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
7517  VmaBlockMetadata(hAllocator),
7518  m_FreeCount(0),
7519  m_SumFreeSize(0),
7520  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
7521  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
7522 {
7523 }
7524 
7525 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
7526 {
7527 }
7528 
7529 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
7530 {
7531  VmaBlockMetadata::Init(size);
7532 
7533  m_FreeCount = 1;
7534  m_SumFreeSize = size;
7535 
7536  VmaSuballocation suballoc = {};
7537  suballoc.offset = 0;
7538  suballoc.size = size;
7539  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7540  suballoc.hAllocation = VK_NULL_HANDLE;
7541 
7542  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
7543  m_Suballocations.push_back(suballoc);
7544  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
7545  --suballocItem;
7546  m_FreeSuballocationsBySize.push_back(suballocItem);
7547 }
7548 
7549 bool VmaBlockMetadata_Generic::Validate() const
7550 {
7551  VMA_VALIDATE(!m_Suballocations.empty());
7552 
7553  // Expected offset of new suballocation as calculated from previous ones.
7554  VkDeviceSize calculatedOffset = 0;
7555  // Expected number of free suballocations as calculated from traversing their list.
7556  uint32_t calculatedFreeCount = 0;
7557  // Expected sum size of free suballocations as calculated from traversing their list.
7558  VkDeviceSize calculatedSumFreeSize = 0;
7559  // Expected number of free suballocations that should be registered in
7560  // m_FreeSuballocationsBySize calculated from traversing their list.
7561  size_t freeSuballocationsToRegister = 0;
7562  // True if previous visited suballocation was free.
7563  bool prevFree = false;
7564 
7565  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7566  suballocItem != m_Suballocations.cend();
7567  ++suballocItem)
7568  {
7569  const VmaSuballocation& subAlloc = *suballocItem;
7570 
7571  // Actual offset of this suballocation doesn't match expected one.
7572  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
7573 
7574  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
7575  // Two adjacent free suballocations are invalid. They should be merged.
7576  VMA_VALIDATE(!prevFree || !currFree);
7577 
7578  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
7579 
7580  if(currFree)
7581  {
7582  calculatedSumFreeSize += subAlloc.size;
7583  ++calculatedFreeCount;
7584  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7585  {
7586  ++freeSuballocationsToRegister;
7587  }
7588 
7589  // Margin required between allocations - every free space must be at least that large.
7590  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
7591  }
7592  else
7593  {
7594  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
7595  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
7596 
7597  // Margin required between allocations - previous allocation must be free.
7598  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
7599  }
7600 
7601  calculatedOffset += subAlloc.size;
7602  prevFree = currFree;
7603  }
7604 
7605  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
7606  // match expected one.
7607  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
7608 
7609  VkDeviceSize lastSize = 0;
7610  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
7611  {
7612  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
7613 
7614  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
7615  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
7616  // They must be sorted by size ascending.
7617  VMA_VALIDATE(suballocItem->size >= lastSize);
7618 
7619  lastSize = suballocItem->size;
7620  }
7621 
7622  // Check if totals match calculacted values.
7623  VMA_VALIDATE(ValidateFreeSuballocationList());
7624  VMA_VALIDATE(calculatedOffset == GetSize());
7625  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
7626  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
7627 
7628  return true;
7629 }
7630 
7631 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
7632 {
7633  if(!m_FreeSuballocationsBySize.empty())
7634  {
7635  return m_FreeSuballocationsBySize.back()->size;
7636  }
7637  else
7638  {
7639  return 0;
7640  }
7641 }
7642 
7643 bool VmaBlockMetadata_Generic::IsEmpty() const
7644 {
7645  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
7646 }
7647 
7648 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
7649 {
7650  outInfo.blockCount = 1;
7651 
7652  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7653  outInfo.allocationCount = rangeCount - m_FreeCount;
7654  outInfo.unusedRangeCount = m_FreeCount;
7655 
7656  outInfo.unusedBytes = m_SumFreeSize;
7657  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
7658 
7659  outInfo.allocationSizeMin = UINT64_MAX;
7660  outInfo.allocationSizeMax = 0;
7661  outInfo.unusedRangeSizeMin = UINT64_MAX;
7662  outInfo.unusedRangeSizeMax = 0;
7663 
7664  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7665  suballocItem != m_Suballocations.cend();
7666  ++suballocItem)
7667  {
7668  const VmaSuballocation& suballoc = *suballocItem;
7669  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
7670  {
7671  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
7672  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
7673  }
7674  else
7675  {
7676  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
7677  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
7678  }
7679  }
7680 }
7681 
7682 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
7683 {
7684  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
7685 
7686  inoutStats.size += GetSize();
7687  inoutStats.unusedSize += m_SumFreeSize;
7688  inoutStats.allocationCount += rangeCount - m_FreeCount;
7689  inoutStats.unusedRangeCount += m_FreeCount;
7690  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
7691 }
7692 
7693 #if VMA_STATS_STRING_ENABLED
7694 
7695 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
7696 {
7697  PrintDetailedMap_Begin(json,
7698  m_SumFreeSize, // unusedBytes
7699  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
7700  m_FreeCount); // unusedRangeCount
7701 
7702  size_t i = 0;
7703  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
7704  suballocItem != m_Suballocations.cend();
7705  ++suballocItem, ++i)
7706  {
7707  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
7708  {
7709  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
7710  }
7711  else
7712  {
7713  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
7714  }
7715  }
7716 
7717  PrintDetailedMap_End(json);
7718 }
7719 
7720 #endif // #if VMA_STATS_STRING_ENABLED
7721 
7722 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
7723  uint32_t currentFrameIndex,
7724  uint32_t frameInUseCount,
7725  VkDeviceSize bufferImageGranularity,
7726  VkDeviceSize allocSize,
7727  VkDeviceSize allocAlignment,
7728  bool upperAddress,
7729  VmaSuballocationType allocType,
7730  bool canMakeOtherLost,
7731  uint32_t strategy,
7732  VmaAllocationRequest* pAllocationRequest)
7733 {
7734  VMA_ASSERT(allocSize > 0);
7735  VMA_ASSERT(!upperAddress);
7736  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
7737  VMA_ASSERT(pAllocationRequest != VMA_NULL);
7738  VMA_HEAVY_ASSERT(Validate());
7739 
7740  // There is not enough total free space in this block to fullfill the request: Early return.
7741  if(canMakeOtherLost == false &&
7742  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
7743  {
7744  return false;
7745  }
7746 
7747  // New algorithm, efficiently searching freeSuballocationsBySize.
7748  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
7749  if(freeSuballocCount > 0)
7750  {
7752  {
7753  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
7754  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
7755  m_FreeSuballocationsBySize.data(),
7756  m_FreeSuballocationsBySize.data() + freeSuballocCount,
7757  allocSize + 2 * VMA_DEBUG_MARGIN,
7758  VmaSuballocationItemSizeLess());
7759  size_t index = it - m_FreeSuballocationsBySize.data();
7760  for(; index < freeSuballocCount; ++index)
7761  {
7762  if(CheckAllocation(
7763  currentFrameIndex,
7764  frameInUseCount,
7765  bufferImageGranularity,
7766  allocSize,
7767  allocAlignment,
7768  allocType,
7769  m_FreeSuballocationsBySize[index],
7770  false, // canMakeOtherLost
7771  &pAllocationRequest->offset,
7772  &pAllocationRequest->itemsToMakeLostCount,
7773  &pAllocationRequest->sumFreeSize,
7774  &pAllocationRequest->sumItemSize))
7775  {
7776  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7777  return true;
7778  }
7779  }
7780  }
7781  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
7782  {
7783  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7784  it != m_Suballocations.end();
7785  ++it)
7786  {
7787  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
7788  currentFrameIndex,
7789  frameInUseCount,
7790  bufferImageGranularity,
7791  allocSize,
7792  allocAlignment,
7793  allocType,
7794  it,
7795  false, // canMakeOtherLost
7796  &pAllocationRequest->offset,
7797  &pAllocationRequest->itemsToMakeLostCount,
7798  &pAllocationRequest->sumFreeSize,
7799  &pAllocationRequest->sumItemSize))
7800  {
7801  pAllocationRequest->item = it;
7802  return true;
7803  }
7804  }
7805  }
7806  else // WORST_FIT, FIRST_FIT
7807  {
7808  // Search staring from biggest suballocations.
7809  for(size_t index = freeSuballocCount; index--; )
7810  {
7811  if(CheckAllocation(
7812  currentFrameIndex,
7813  frameInUseCount,
7814  bufferImageGranularity,
7815  allocSize,
7816  allocAlignment,
7817  allocType,
7818  m_FreeSuballocationsBySize[index],
7819  false, // canMakeOtherLost
7820  &pAllocationRequest->offset,
7821  &pAllocationRequest->itemsToMakeLostCount,
7822  &pAllocationRequest->sumFreeSize,
7823  &pAllocationRequest->sumItemSize))
7824  {
7825  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
7826  return true;
7827  }
7828  }
7829  }
7830  }
7831 
7832  if(canMakeOtherLost)
7833  {
7834  // Brute-force algorithm. TODO: Come up with something better.
7835 
7836  pAllocationRequest->sumFreeSize = VK_WHOLE_SIZE;
7837  pAllocationRequest->sumItemSize = VK_WHOLE_SIZE;
7838 
7839  VmaAllocationRequest tmpAllocRequest = {};
7840  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
7841  suballocIt != m_Suballocations.end();
7842  ++suballocIt)
7843  {
7844  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
7845  suballocIt->hAllocation->CanBecomeLost())
7846  {
7847  if(CheckAllocation(
7848  currentFrameIndex,
7849  frameInUseCount,
7850  bufferImageGranularity,
7851  allocSize,
7852  allocAlignment,
7853  allocType,
7854  suballocIt,
7855  canMakeOtherLost,
7856  &tmpAllocRequest.offset,
7857  &tmpAllocRequest.itemsToMakeLostCount,
7858  &tmpAllocRequest.sumFreeSize,
7859  &tmpAllocRequest.sumItemSize))
7860  {
7861  tmpAllocRequest.item = suballocIt;
7862 
7863  if(tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost() ||
7865  {
7866  *pAllocationRequest = tmpAllocRequest;
7867  }
7868  }
7869  }
7870  }
7871 
7872  if(pAllocationRequest->sumItemSize != VK_WHOLE_SIZE)
7873  {
7874  return true;
7875  }
7876  }
7877 
7878  return false;
7879 }
7880 
7881 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
7882  uint32_t currentFrameIndex,
7883  uint32_t frameInUseCount,
7884  VmaAllocationRequest* pAllocationRequest)
7885 {
7886  while(pAllocationRequest->itemsToMakeLostCount > 0)
7887  {
7888  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
7889  {
7890  ++pAllocationRequest->item;
7891  }
7892  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7893  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
7894  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
7895  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7896  {
7897  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
7898  --pAllocationRequest->itemsToMakeLostCount;
7899  }
7900  else
7901  {
7902  return false;
7903  }
7904  }
7905 
7906  VMA_HEAVY_ASSERT(Validate());
7907  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
7908  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
7909 
7910  return true;
7911 }
7912 
7913 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
7914 {
7915  uint32_t lostAllocationCount = 0;
7916  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7917  it != m_Suballocations.end();
7918  ++it)
7919  {
7920  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
7921  it->hAllocation->CanBecomeLost() &&
7922  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
7923  {
7924  it = FreeSuballocation(it);
7925  ++lostAllocationCount;
7926  }
7927  }
7928  return lostAllocationCount;
7929 }
7930 
7931 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
7932 {
7933  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
7934  it != m_Suballocations.end();
7935  ++it)
7936  {
7937  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
7938  {
7939  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
7940  {
7941  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
7942  return VK_ERROR_VALIDATION_FAILED_EXT;
7943  }
7944  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
7945  {
7946  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
7947  return VK_ERROR_VALIDATION_FAILED_EXT;
7948  }
7949  }
7950  }
7951 
7952  return VK_SUCCESS;
7953 }
7954 
7955 void VmaBlockMetadata_Generic::Alloc(
7956  const VmaAllocationRequest& request,
7957  VmaSuballocationType type,
7958  VkDeviceSize allocSize,
7959  bool upperAddress,
7960  VmaAllocation hAllocation)
7961 {
7962  VMA_ASSERT(!upperAddress);
7963  VMA_ASSERT(request.item != m_Suballocations.end());
7964  VmaSuballocation& suballoc = *request.item;
7965  // Given suballocation is a free block.
7966  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
7967  // Given offset is inside this suballocation.
7968  VMA_ASSERT(request.offset >= suballoc.offset);
7969  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
7970  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
7971  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
7972 
7973  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
7974  // it to become used.
7975  UnregisterFreeSuballocation(request.item);
7976 
7977  suballoc.offset = request.offset;
7978  suballoc.size = allocSize;
7979  suballoc.type = type;
7980  suballoc.hAllocation = hAllocation;
7981 
7982  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
7983  if(paddingEnd)
7984  {
7985  VmaSuballocation paddingSuballoc = {};
7986  paddingSuballoc.offset = request.offset + allocSize;
7987  paddingSuballoc.size = paddingEnd;
7988  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
7989  VmaSuballocationList::iterator next = request.item;
7990  ++next;
7991  const VmaSuballocationList::iterator paddingEndItem =
7992  m_Suballocations.insert(next, paddingSuballoc);
7993  RegisterFreeSuballocation(paddingEndItem);
7994  }
7995 
7996  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
7997  if(paddingBegin)
7998  {
7999  VmaSuballocation paddingSuballoc = {};
8000  paddingSuballoc.offset = request.offset - paddingBegin;
8001  paddingSuballoc.size = paddingBegin;
8002  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8003  const VmaSuballocationList::iterator paddingBeginItem =
8004  m_Suballocations.insert(request.item, paddingSuballoc);
8005  RegisterFreeSuballocation(paddingBeginItem);
8006  }
8007 
8008  // Update totals.
8009  m_FreeCount = m_FreeCount - 1;
8010  if(paddingBegin > 0)
8011  {
8012  ++m_FreeCount;
8013  }
8014  if(paddingEnd > 0)
8015  {
8016  ++m_FreeCount;
8017  }
8018  m_SumFreeSize -= allocSize;
8019 }
8020 
8021 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
8022 {
8023  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8024  suballocItem != m_Suballocations.end();
8025  ++suballocItem)
8026  {
8027  VmaSuballocation& suballoc = *suballocItem;
8028  if(suballoc.hAllocation == allocation)
8029  {
8030  FreeSuballocation(suballocItem);
8031  VMA_HEAVY_ASSERT(Validate());
8032  return;
8033  }
8034  }
8035  VMA_ASSERT(0 && "Not found!");
8036 }
8037 
8038 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
8039 {
8040  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
8041  suballocItem != m_Suballocations.end();
8042  ++suballocItem)
8043  {
8044  VmaSuballocation& suballoc = *suballocItem;
8045  if(suballoc.offset == offset)
8046  {
8047  FreeSuballocation(suballocItem);
8048  return;
8049  }
8050  }
8051  VMA_ASSERT(0 && "Not found!");
8052 }
8053 
8054 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
8055 {
8056  typedef VmaSuballocationList::iterator iter_type;
8057  for(iter_type suballocItem = m_Suballocations.begin();
8058  suballocItem != m_Suballocations.end();
8059  ++suballocItem)
8060  {
8061  VmaSuballocation& suballoc = *suballocItem;
8062  if(suballoc.hAllocation == alloc)
8063  {
8064  iter_type nextItem = suballocItem;
8065  ++nextItem;
8066 
8067  // Should have been ensured on higher level.
8068  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
8069 
8070  // Shrinking.
8071  if(newSize < alloc->GetSize())
8072  {
8073  const VkDeviceSize sizeDiff = suballoc.size - newSize;
8074 
8075  // There is next item.
8076  if(nextItem != m_Suballocations.end())
8077  {
8078  // Next item is free.
8079  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8080  {
8081  // Grow this next item backward.
8082  UnregisterFreeSuballocation(nextItem);
8083  nextItem->offset -= sizeDiff;
8084  nextItem->size += sizeDiff;
8085  RegisterFreeSuballocation(nextItem);
8086  }
8087  // Next item is not free.
8088  else
8089  {
8090  // Create free item after current one.
8091  VmaSuballocation newFreeSuballoc;
8092  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8093  newFreeSuballoc.offset = suballoc.offset + newSize;
8094  newFreeSuballoc.size = sizeDiff;
8095  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8096  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
8097  RegisterFreeSuballocation(newFreeSuballocIt);
8098 
8099  ++m_FreeCount;
8100  }
8101  }
8102  // This is the last item.
8103  else
8104  {
8105  // Create free item at the end.
8106  VmaSuballocation newFreeSuballoc;
8107  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
8108  newFreeSuballoc.offset = suballoc.offset + newSize;
8109  newFreeSuballoc.size = sizeDiff;
8110  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8111  m_Suballocations.push_back(newFreeSuballoc);
8112 
8113  iter_type newFreeSuballocIt = m_Suballocations.end();
8114  RegisterFreeSuballocation(--newFreeSuballocIt);
8115 
8116  ++m_FreeCount;
8117  }
8118 
8119  suballoc.size = newSize;
8120  m_SumFreeSize += sizeDiff;
8121  }
8122  // Growing.
8123  else
8124  {
8125  const VkDeviceSize sizeDiff = newSize - suballoc.size;
8126 
8127  // There is next item.
8128  if(nextItem != m_Suballocations.end())
8129  {
8130  // Next item is free.
8131  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8132  {
8133  // There is not enough free space, including margin.
8134  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
8135  {
8136  return false;
8137  }
8138 
8139  // There is more free space than required.
8140  if(nextItem->size > sizeDiff)
8141  {
8142  // Move and shrink this next item.
8143  UnregisterFreeSuballocation(nextItem);
8144  nextItem->offset += sizeDiff;
8145  nextItem->size -= sizeDiff;
8146  RegisterFreeSuballocation(nextItem);
8147  }
8148  // There is exactly the amount of free space required.
8149  else
8150  {
8151  // Remove this next free item.
8152  UnregisterFreeSuballocation(nextItem);
8153  m_Suballocations.erase(nextItem);
8154  --m_FreeCount;
8155  }
8156  }
8157  // Next item is not free - there is no space to grow.
8158  else
8159  {
8160  return false;
8161  }
8162  }
8163  // This is the last item - there is no space to grow.
8164  else
8165  {
8166  return false;
8167  }
8168 
8169  suballoc.size = newSize;
8170  m_SumFreeSize -= sizeDiff;
8171  }
8172 
8173  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
8174  return true;
8175  }
8176  }
8177  VMA_ASSERT(0 && "Not found!");
8178  return false;
8179 }
8180 
8181 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
8182 {
8183  VkDeviceSize lastSize = 0;
8184  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
8185  {
8186  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
8187 
8188  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
8189  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8190  VMA_VALIDATE(it->size >= lastSize);
8191  lastSize = it->size;
8192  }
8193  return true;
8194 }
8195 
8196 bool VmaBlockMetadata_Generic::CheckAllocation(
8197  uint32_t currentFrameIndex,
8198  uint32_t frameInUseCount,
8199  VkDeviceSize bufferImageGranularity,
8200  VkDeviceSize allocSize,
8201  VkDeviceSize allocAlignment,
8202  VmaSuballocationType allocType,
8203  VmaSuballocationList::const_iterator suballocItem,
8204  bool canMakeOtherLost,
8205  VkDeviceSize* pOffset,
8206  size_t* itemsToMakeLostCount,
8207  VkDeviceSize* pSumFreeSize,
8208  VkDeviceSize* pSumItemSize) const
8209 {
8210  VMA_ASSERT(allocSize > 0);
8211  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
8212  VMA_ASSERT(suballocItem != m_Suballocations.cend());
8213  VMA_ASSERT(pOffset != VMA_NULL);
8214 
8215  *itemsToMakeLostCount = 0;
8216  *pSumFreeSize = 0;
8217  *pSumItemSize = 0;
8218 
8219  if(canMakeOtherLost)
8220  {
8221  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8222  {
8223  *pSumFreeSize = suballocItem->size;
8224  }
8225  else
8226  {
8227  if(suballocItem->hAllocation->CanBecomeLost() &&
8228  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8229  {
8230  ++*itemsToMakeLostCount;
8231  *pSumItemSize = suballocItem->size;
8232  }
8233  else
8234  {
8235  return false;
8236  }
8237  }
8238 
8239  // Remaining size is too small for this request: Early return.
8240  if(GetSize() - suballocItem->offset < allocSize)
8241  {
8242  return false;
8243  }
8244 
8245  // Start from offset equal to beginning of this suballocation.
8246  *pOffset = suballocItem->offset;
8247 
8248  // Apply VMA_DEBUG_MARGIN at the beginning.
8249  if(VMA_DEBUG_MARGIN > 0)
8250  {
8251  *pOffset += VMA_DEBUG_MARGIN;
8252  }
8253 
8254  // Apply alignment.
8255  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8256 
8257  // Check previous suballocations for BufferImageGranularity conflicts.
8258  // Make bigger alignment if necessary.
8259  if(bufferImageGranularity > 1)
8260  {
8261  bool bufferImageGranularityConflict = false;
8262  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8263  while(prevSuballocItem != m_Suballocations.cbegin())
8264  {
8265  --prevSuballocItem;
8266  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8267  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8268  {
8269  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8270  {
8271  bufferImageGranularityConflict = true;
8272  break;
8273  }
8274  }
8275  else
8276  // Already on previous page.
8277  break;
8278  }
8279  if(bufferImageGranularityConflict)
8280  {
8281  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8282  }
8283  }
8284 
8285  // Now that we have final *pOffset, check if we are past suballocItem.
8286  // If yes, return false - this function should be called for another suballocItem as starting point.
8287  if(*pOffset >= suballocItem->offset + suballocItem->size)
8288  {
8289  return false;
8290  }
8291 
8292  // Calculate padding at the beginning based on current offset.
8293  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
8294 
8295  // Calculate required margin at the end.
8296  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8297 
8298  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
8299  // Another early return check.
8300  if(suballocItem->offset + totalSize > GetSize())
8301  {
8302  return false;
8303  }
8304 
8305  // Advance lastSuballocItem until desired size is reached.
8306  // Update itemsToMakeLostCount.
8307  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
8308  if(totalSize > suballocItem->size)
8309  {
8310  VkDeviceSize remainingSize = totalSize - suballocItem->size;
8311  while(remainingSize > 0)
8312  {
8313  ++lastSuballocItem;
8314  if(lastSuballocItem == m_Suballocations.cend())
8315  {
8316  return false;
8317  }
8318  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8319  {
8320  *pSumFreeSize += lastSuballocItem->size;
8321  }
8322  else
8323  {
8324  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
8325  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
8326  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8327  {
8328  ++*itemsToMakeLostCount;
8329  *pSumItemSize += lastSuballocItem->size;
8330  }
8331  else
8332  {
8333  return false;
8334  }
8335  }
8336  remainingSize = (lastSuballocItem->size < remainingSize) ?
8337  remainingSize - lastSuballocItem->size : 0;
8338  }
8339  }
8340 
8341  // Check next suballocations for BufferImageGranularity conflicts.
8342  // If conflict exists, we must mark more allocations lost or fail.
8343  if(bufferImageGranularity > 1)
8344  {
8345  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
8346  ++nextSuballocItem;
8347  while(nextSuballocItem != m_Suballocations.cend())
8348  {
8349  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8350  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8351  {
8352  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8353  {
8354  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
8355  if(nextSuballoc.hAllocation->CanBecomeLost() &&
8356  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
8357  {
8358  ++*itemsToMakeLostCount;
8359  }
8360  else
8361  {
8362  return false;
8363  }
8364  }
8365  }
8366  else
8367  {
8368  // Already on next page.
8369  break;
8370  }
8371  ++nextSuballocItem;
8372  }
8373  }
8374  }
8375  else
8376  {
8377  const VmaSuballocation& suballoc = *suballocItem;
8378  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8379 
8380  *pSumFreeSize = suballoc.size;
8381 
8382  // Size of this suballocation is too small for this request: Early return.
8383  if(suballoc.size < allocSize)
8384  {
8385  return false;
8386  }
8387 
8388  // Start from offset equal to beginning of this suballocation.
8389  *pOffset = suballoc.offset;
8390 
8391  // Apply VMA_DEBUG_MARGIN at the beginning.
8392  if(VMA_DEBUG_MARGIN > 0)
8393  {
8394  *pOffset += VMA_DEBUG_MARGIN;
8395  }
8396 
8397  // Apply alignment.
8398  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
8399 
8400  // Check previous suballocations for BufferImageGranularity conflicts.
8401  // Make bigger alignment if necessary.
8402  if(bufferImageGranularity > 1)
8403  {
8404  bool bufferImageGranularityConflict = false;
8405  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
8406  while(prevSuballocItem != m_Suballocations.cbegin())
8407  {
8408  --prevSuballocItem;
8409  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
8410  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
8411  {
8412  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
8413  {
8414  bufferImageGranularityConflict = true;
8415  break;
8416  }
8417  }
8418  else
8419  // Already on previous page.
8420  break;
8421  }
8422  if(bufferImageGranularityConflict)
8423  {
8424  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
8425  }
8426  }
8427 
8428  // Calculate padding at the beginning based on current offset.
8429  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
8430 
8431  // Calculate required margin at the end.
8432  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
8433 
8434  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
8435  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
8436  {
8437  return false;
8438  }
8439 
8440  // Check next suballocations for BufferImageGranularity conflicts.
8441  // If conflict exists, allocation cannot be made here.
8442  if(bufferImageGranularity > 1)
8443  {
8444  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
8445  ++nextSuballocItem;
8446  while(nextSuballocItem != m_Suballocations.cend())
8447  {
8448  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
8449  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
8450  {
8451  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
8452  {
8453  return false;
8454  }
8455  }
8456  else
8457  {
8458  // Already on next page.
8459  break;
8460  }
8461  ++nextSuballocItem;
8462  }
8463  }
8464  }
8465 
8466  // All tests passed: Success. pOffset is already filled.
8467  return true;
8468 }
8469 
8470 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
8471 {
8472  VMA_ASSERT(item != m_Suballocations.end());
8473  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8474 
8475  VmaSuballocationList::iterator nextItem = item;
8476  ++nextItem;
8477  VMA_ASSERT(nextItem != m_Suballocations.end());
8478  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
8479 
8480  item->size += nextItem->size;
8481  --m_FreeCount;
8482  m_Suballocations.erase(nextItem);
8483 }
8484 
8485 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
8486 {
8487  // Change this suballocation to be marked as free.
8488  VmaSuballocation& suballoc = *suballocItem;
8489  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8490  suballoc.hAllocation = VK_NULL_HANDLE;
8491 
8492  // Update totals.
8493  ++m_FreeCount;
8494  m_SumFreeSize += suballoc.size;
8495 
8496  // Merge with previous and/or next suballocation if it's also free.
8497  bool mergeWithNext = false;
8498  bool mergeWithPrev = false;
8499 
8500  VmaSuballocationList::iterator nextItem = suballocItem;
8501  ++nextItem;
8502  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
8503  {
8504  mergeWithNext = true;
8505  }
8506 
8507  VmaSuballocationList::iterator prevItem = suballocItem;
8508  if(suballocItem != m_Suballocations.begin())
8509  {
8510  --prevItem;
8511  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
8512  {
8513  mergeWithPrev = true;
8514  }
8515  }
8516 
8517  if(mergeWithNext)
8518  {
8519  UnregisterFreeSuballocation(nextItem);
8520  MergeFreeWithNext(suballocItem);
8521  }
8522 
8523  if(mergeWithPrev)
8524  {
8525  UnregisterFreeSuballocation(prevItem);
8526  MergeFreeWithNext(prevItem);
8527  RegisterFreeSuballocation(prevItem);
8528  return prevItem;
8529  }
8530  else
8531  {
8532  RegisterFreeSuballocation(suballocItem);
8533  return suballocItem;
8534  }
8535 }
8536 
8537 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
8538 {
8539  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8540  VMA_ASSERT(item->size > 0);
8541 
8542  // You may want to enable this validation at the beginning or at the end of
8543  // this function, depending on what do you want to check.
8544  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8545 
8546  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8547  {
8548  if(m_FreeSuballocationsBySize.empty())
8549  {
8550  m_FreeSuballocationsBySize.push_back(item);
8551  }
8552  else
8553  {
8554  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
8555  }
8556  }
8557 
8558  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8559 }
8560 
8561 
8562 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
8563 {
8564  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
8565  VMA_ASSERT(item->size > 0);
8566 
8567  // You may want to enable this validation at the beginning or at the end of
8568  // this function, depending on what do you want to check.
8569  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8570 
8571  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
8572  {
8573  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
8574  m_FreeSuballocationsBySize.data(),
8575  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
8576  item,
8577  VmaSuballocationItemSizeLess());
8578  for(size_t index = it - m_FreeSuballocationsBySize.data();
8579  index < m_FreeSuballocationsBySize.size();
8580  ++index)
8581  {
8582  if(m_FreeSuballocationsBySize[index] == item)
8583  {
8584  VmaVectorRemove(m_FreeSuballocationsBySize, index);
8585  return;
8586  }
8587  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
8588  }
8589  VMA_ASSERT(0 && "Not found.");
8590  }
8591 
8592  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
8593 }
8594 
8595 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
8596  VkDeviceSize bufferImageGranularity,
8597  VmaSuballocationType& inOutPrevSuballocType) const
8598 {
8599  if(bufferImageGranularity == 1 || IsEmpty())
8600  {
8601  return false;
8602  }
8603 
8604  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
8605  bool typeConflictFound = false;
8606  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
8607  it != m_Suballocations.cend();
8608  ++it)
8609  {
8610  const VmaSuballocationType suballocType = it->type;
8611  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
8612  {
8613  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
8614  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
8615  {
8616  typeConflictFound = true;
8617  }
8618  inOutPrevSuballocType = suballocType;
8619  }
8620  }
8621 
8622  return typeConflictFound || minAlignment >= bufferImageGranularity;
8623 }
8624 
8626 // class VmaBlockMetadata_Linear
8627 
8628 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
8629  VmaBlockMetadata(hAllocator),
8630  m_SumFreeSize(0),
8631  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8632  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8633  m_1stVectorIndex(0),
8634  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
8635  m_1stNullItemsBeginCount(0),
8636  m_1stNullItemsMiddleCount(0),
8637  m_2ndNullItemsCount(0)
8638 {
8639 }
8640 
8641 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
8642 {
8643 }
8644 
8645 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
8646 {
8647  VmaBlockMetadata::Init(size);
8648  m_SumFreeSize = size;
8649 }
8650 
8651 bool VmaBlockMetadata_Linear::Validate() const
8652 {
8653  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8654  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8655 
8656  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
8657  VMA_VALIDATE(!suballocations1st.empty() ||
8658  suballocations2nd.empty() ||
8659  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
8660 
8661  if(!suballocations1st.empty())
8662  {
8663  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
8664  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
8665  // Null item at the end should be just pop_back().
8666  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
8667  }
8668  if(!suballocations2nd.empty())
8669  {
8670  // Null item at the end should be just pop_back().
8671  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
8672  }
8673 
8674  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
8675  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
8676 
8677  VkDeviceSize sumUsedSize = 0;
8678  const size_t suballoc1stCount = suballocations1st.size();
8679  VkDeviceSize offset = VMA_DEBUG_MARGIN;
8680 
8681  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8682  {
8683  const size_t suballoc2ndCount = suballocations2nd.size();
8684  size_t nullItem2ndCount = 0;
8685  for(size_t i = 0; i < suballoc2ndCount; ++i)
8686  {
8687  const VmaSuballocation& suballoc = suballocations2nd[i];
8688  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8689 
8690  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8691  VMA_VALIDATE(suballoc.offset >= offset);
8692 
8693  if(!currFree)
8694  {
8695  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8696  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8697  sumUsedSize += suballoc.size;
8698  }
8699  else
8700  {
8701  ++nullItem2ndCount;
8702  }
8703 
8704  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8705  }
8706 
8707  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8708  }
8709 
8710  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
8711  {
8712  const VmaSuballocation& suballoc = suballocations1st[i];
8713  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
8714  suballoc.hAllocation == VK_NULL_HANDLE);
8715  }
8716 
8717  size_t nullItem1stCount = m_1stNullItemsBeginCount;
8718 
8719  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
8720  {
8721  const VmaSuballocation& suballoc = suballocations1st[i];
8722  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8723 
8724  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8725  VMA_VALIDATE(suballoc.offset >= offset);
8726  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
8727 
8728  if(!currFree)
8729  {
8730  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8731  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8732  sumUsedSize += suballoc.size;
8733  }
8734  else
8735  {
8736  ++nullItem1stCount;
8737  }
8738 
8739  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8740  }
8741  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
8742 
8743  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8744  {
8745  const size_t suballoc2ndCount = suballocations2nd.size();
8746  size_t nullItem2ndCount = 0;
8747  for(size_t i = suballoc2ndCount; i--; )
8748  {
8749  const VmaSuballocation& suballoc = suballocations2nd[i];
8750  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
8751 
8752  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
8753  VMA_VALIDATE(suballoc.offset >= offset);
8754 
8755  if(!currFree)
8756  {
8757  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
8758  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
8759  sumUsedSize += suballoc.size;
8760  }
8761  else
8762  {
8763  ++nullItem2ndCount;
8764  }
8765 
8766  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
8767  }
8768 
8769  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
8770  }
8771 
8772  VMA_VALIDATE(offset <= GetSize());
8773  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
8774 
8775  return true;
8776 }
8777 
8778 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
8779 {
8780  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
8781  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
8782 }
8783 
8784 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
8785 {
8786  const VkDeviceSize size = GetSize();
8787 
8788  /*
8789  We don't consider gaps inside allocation vectors with freed allocations because
8790  they are not suitable for reuse in linear allocator. We consider only space that
8791  is available for new allocations.
8792  */
8793  if(IsEmpty())
8794  {
8795  return size;
8796  }
8797 
8798  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8799 
8800  switch(m_2ndVectorMode)
8801  {
8802  case SECOND_VECTOR_EMPTY:
8803  /*
8804  Available space is after end of 1st, as well as before beginning of 1st (which
8805  whould make it a ring buffer).
8806  */
8807  {
8808  const size_t suballocations1stCount = suballocations1st.size();
8809  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
8810  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
8811  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
8812  return VMA_MAX(
8813  firstSuballoc.offset,
8814  size - (lastSuballoc.offset + lastSuballoc.size));
8815  }
8816  break;
8817 
8818  case SECOND_VECTOR_RING_BUFFER:
8819  /*
8820  Available space is only between end of 2nd and beginning of 1st.
8821  */
8822  {
8823  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8824  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
8825  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
8826  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
8827  }
8828  break;
8829 
8830  case SECOND_VECTOR_DOUBLE_STACK:
8831  /*
8832  Available space is only between end of 1st and top of 2nd.
8833  */
8834  {
8835  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8836  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
8837  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
8838  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
8839  }
8840  break;
8841 
8842  default:
8843  VMA_ASSERT(0);
8844  return 0;
8845  }
8846 }
8847 
8848 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
8849 {
8850  const VkDeviceSize size = GetSize();
8851  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
8852  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
8853  const size_t suballoc1stCount = suballocations1st.size();
8854  const size_t suballoc2ndCount = suballocations2nd.size();
8855 
8856  outInfo.blockCount = 1;
8857  outInfo.allocationCount = (uint32_t)GetAllocationCount();
8858  outInfo.unusedRangeCount = 0;
8859  outInfo.usedBytes = 0;
8860  outInfo.allocationSizeMin = UINT64_MAX;
8861  outInfo.allocationSizeMax = 0;
8862  outInfo.unusedRangeSizeMin = UINT64_MAX;
8863  outInfo.unusedRangeSizeMax = 0;
8864 
8865  VkDeviceSize lastOffset = 0;
8866 
8867  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
8868  {
8869  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
8870  size_t nextAlloc2ndIndex = 0;
8871  while(lastOffset < freeSpace2ndTo1stEnd)
8872  {
8873  // Find next non-null allocation or move nextAllocIndex to the end.
8874  while(nextAlloc2ndIndex < suballoc2ndCount &&
8875  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8876  {
8877  ++nextAlloc2ndIndex;
8878  }
8879 
8880  // Found non-null allocation.
8881  if(nextAlloc2ndIndex < suballoc2ndCount)
8882  {
8883  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8884 
8885  // 1. Process free space before this allocation.
8886  if(lastOffset < suballoc.offset)
8887  {
8888  // There is free space from lastOffset to suballoc.offset.
8889  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8890  ++outInfo.unusedRangeCount;
8891  outInfo.unusedBytes += unusedRangeSize;
8892  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8893  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8894  }
8895 
8896  // 2. Process this allocation.
8897  // There is allocation with suballoc.offset, suballoc.size.
8898  outInfo.usedBytes += suballoc.size;
8899  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8900  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8901 
8902  // 3. Prepare for next iteration.
8903  lastOffset = suballoc.offset + suballoc.size;
8904  ++nextAlloc2ndIndex;
8905  }
8906  // We are at the end.
8907  else
8908  {
8909  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
8910  if(lastOffset < freeSpace2ndTo1stEnd)
8911  {
8912  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
8913  ++outInfo.unusedRangeCount;
8914  outInfo.unusedBytes += unusedRangeSize;
8915  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8916  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8917  }
8918 
8919  // End of loop.
8920  lastOffset = freeSpace2ndTo1stEnd;
8921  }
8922  }
8923  }
8924 
8925  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
8926  const VkDeviceSize freeSpace1stTo2ndEnd =
8927  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
8928  while(lastOffset < freeSpace1stTo2ndEnd)
8929  {
8930  // Find next non-null allocation or move nextAllocIndex to the end.
8931  while(nextAlloc1stIndex < suballoc1stCount &&
8932  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
8933  {
8934  ++nextAlloc1stIndex;
8935  }
8936 
8937  // Found non-null allocation.
8938  if(nextAlloc1stIndex < suballoc1stCount)
8939  {
8940  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
8941 
8942  // 1. Process free space before this allocation.
8943  if(lastOffset < suballoc.offset)
8944  {
8945  // There is free space from lastOffset to suballoc.offset.
8946  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
8947  ++outInfo.unusedRangeCount;
8948  outInfo.unusedBytes += unusedRangeSize;
8949  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8950  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8951  }
8952 
8953  // 2. Process this allocation.
8954  // There is allocation with suballoc.offset, suballoc.size.
8955  outInfo.usedBytes += suballoc.size;
8956  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
8957  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
8958 
8959  // 3. Prepare for next iteration.
8960  lastOffset = suballoc.offset + suballoc.size;
8961  ++nextAlloc1stIndex;
8962  }
8963  // We are at the end.
8964  else
8965  {
8966  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
8967  if(lastOffset < freeSpace1stTo2ndEnd)
8968  {
8969  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
8970  ++outInfo.unusedRangeCount;
8971  outInfo.unusedBytes += unusedRangeSize;
8972  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
8973  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
8974  }
8975 
8976  // End of loop.
8977  lastOffset = freeSpace1stTo2ndEnd;
8978  }
8979  }
8980 
8981  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
8982  {
8983  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
8984  while(lastOffset < size)
8985  {
8986  // Find next non-null allocation or move nextAllocIndex to the end.
8987  while(nextAlloc2ndIndex != SIZE_MAX &&
8988  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
8989  {
8990  --nextAlloc2ndIndex;
8991  }
8992 
8993  // Found non-null allocation.
8994  if(nextAlloc2ndIndex != SIZE_MAX)
8995  {
8996  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
8997 
8998  // 1. Process free space before this allocation.
8999  if(lastOffset < suballoc.offset)
9000  {
9001  // There is free space from lastOffset to suballoc.offset.
9002  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9003  ++outInfo.unusedRangeCount;
9004  outInfo.unusedBytes += unusedRangeSize;
9005  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9006  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9007  }
9008 
9009  // 2. Process this allocation.
9010  // There is allocation with suballoc.offset, suballoc.size.
9011  outInfo.usedBytes += suballoc.size;
9012  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9013  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
9014 
9015  // 3. Prepare for next iteration.
9016  lastOffset = suballoc.offset + suballoc.size;
9017  --nextAlloc2ndIndex;
9018  }
9019  // We are at the end.
9020  else
9021  {
9022  // There is free space from lastOffset to size.
9023  if(lastOffset < size)
9024  {
9025  const VkDeviceSize unusedRangeSize = size - lastOffset;
9026  ++outInfo.unusedRangeCount;
9027  outInfo.unusedBytes += unusedRangeSize;
9028  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
9029  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
9030  }
9031 
9032  // End of loop.
9033  lastOffset = size;
9034  }
9035  }
9036  }
9037 
9038  outInfo.unusedBytes = size - outInfo.usedBytes;
9039 }
9040 
9041 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
9042 {
9043  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9044  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9045  const VkDeviceSize size = GetSize();
9046  const size_t suballoc1stCount = suballocations1st.size();
9047  const size_t suballoc2ndCount = suballocations2nd.size();
9048 
9049  inoutStats.size += size;
9050 
9051  VkDeviceSize lastOffset = 0;
9052 
9053  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9054  {
9055  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9056  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
9057  while(lastOffset < freeSpace2ndTo1stEnd)
9058  {
9059  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9060  while(nextAlloc2ndIndex < suballoc2ndCount &&
9061  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9062  {
9063  ++nextAlloc2ndIndex;
9064  }
9065 
9066  // Found non-null allocation.
9067  if(nextAlloc2ndIndex < suballoc2ndCount)
9068  {
9069  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9070 
9071  // 1. Process free space before this allocation.
9072  if(lastOffset < suballoc.offset)
9073  {
9074  // There is free space from lastOffset to suballoc.offset.
9075  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9076  inoutStats.unusedSize += unusedRangeSize;
9077  ++inoutStats.unusedRangeCount;
9078  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9079  }
9080 
9081  // 2. Process this allocation.
9082  // There is allocation with suballoc.offset, suballoc.size.
9083  ++inoutStats.allocationCount;
9084 
9085  // 3. Prepare for next iteration.
9086  lastOffset = suballoc.offset + suballoc.size;
9087  ++nextAlloc2ndIndex;
9088  }
9089  // We are at the end.
9090  else
9091  {
9092  if(lastOffset < freeSpace2ndTo1stEnd)
9093  {
9094  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9095  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9096  inoutStats.unusedSize += unusedRangeSize;
9097  ++inoutStats.unusedRangeCount;
9098  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9099  }
9100 
9101  // End of loop.
9102  lastOffset = freeSpace2ndTo1stEnd;
9103  }
9104  }
9105  }
9106 
9107  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9108  const VkDeviceSize freeSpace1stTo2ndEnd =
9109  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9110  while(lastOffset < freeSpace1stTo2ndEnd)
9111  {
9112  // Find next non-null allocation or move nextAllocIndex to the end.
9113  while(nextAlloc1stIndex < suballoc1stCount &&
9114  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9115  {
9116  ++nextAlloc1stIndex;
9117  }
9118 
9119  // Found non-null allocation.
9120  if(nextAlloc1stIndex < suballoc1stCount)
9121  {
9122  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9123 
9124  // 1. Process free space before this allocation.
9125  if(lastOffset < suballoc.offset)
9126  {
9127  // There is free space from lastOffset to suballoc.offset.
9128  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9129  inoutStats.unusedSize += unusedRangeSize;
9130  ++inoutStats.unusedRangeCount;
9131  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9132  }
9133 
9134  // 2. Process this allocation.
9135  // There is allocation with suballoc.offset, suballoc.size.
9136  ++inoutStats.allocationCount;
9137 
9138  // 3. Prepare for next iteration.
9139  lastOffset = suballoc.offset + suballoc.size;
9140  ++nextAlloc1stIndex;
9141  }
9142  // We are at the end.
9143  else
9144  {
9145  if(lastOffset < freeSpace1stTo2ndEnd)
9146  {
9147  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9148  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9149  inoutStats.unusedSize += unusedRangeSize;
9150  ++inoutStats.unusedRangeCount;
9151  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9152  }
9153 
9154  // End of loop.
9155  lastOffset = freeSpace1stTo2ndEnd;
9156  }
9157  }
9158 
9159  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9160  {
9161  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9162  while(lastOffset < size)
9163  {
9164  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9165  while(nextAlloc2ndIndex != SIZE_MAX &&
9166  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9167  {
9168  --nextAlloc2ndIndex;
9169  }
9170 
9171  // Found non-null allocation.
9172  if(nextAlloc2ndIndex != SIZE_MAX)
9173  {
9174  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9175 
9176  // 1. Process free space before this allocation.
9177  if(lastOffset < suballoc.offset)
9178  {
9179  // There is free space from lastOffset to suballoc.offset.
9180  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9181  inoutStats.unusedSize += unusedRangeSize;
9182  ++inoutStats.unusedRangeCount;
9183  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9184  }
9185 
9186  // 2. Process this allocation.
9187  // There is allocation with suballoc.offset, suballoc.size.
9188  ++inoutStats.allocationCount;
9189 
9190  // 3. Prepare for next iteration.
9191  lastOffset = suballoc.offset + suballoc.size;
9192  --nextAlloc2ndIndex;
9193  }
9194  // We are at the end.
9195  else
9196  {
9197  if(lastOffset < size)
9198  {
9199  // There is free space from lastOffset to size.
9200  const VkDeviceSize unusedRangeSize = size - lastOffset;
9201  inoutStats.unusedSize += unusedRangeSize;
9202  ++inoutStats.unusedRangeCount;
9203  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
9204  }
9205 
9206  // End of loop.
9207  lastOffset = size;
9208  }
9209  }
9210  }
9211 }
9212 
9213 #if VMA_STATS_STRING_ENABLED
9214 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
9215 {
9216  const VkDeviceSize size = GetSize();
9217  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9218  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9219  const size_t suballoc1stCount = suballocations1st.size();
9220  const size_t suballoc2ndCount = suballocations2nd.size();
9221 
9222  // FIRST PASS
9223 
9224  size_t unusedRangeCount = 0;
9225  VkDeviceSize usedBytes = 0;
9226 
9227  VkDeviceSize lastOffset = 0;
9228 
9229  size_t alloc2ndCount = 0;
9230  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9231  {
9232  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9233  size_t nextAlloc2ndIndex = 0;
9234  while(lastOffset < freeSpace2ndTo1stEnd)
9235  {
9236  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9237  while(nextAlloc2ndIndex < suballoc2ndCount &&
9238  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9239  {
9240  ++nextAlloc2ndIndex;
9241  }
9242 
9243  // Found non-null allocation.
9244  if(nextAlloc2ndIndex < suballoc2ndCount)
9245  {
9246  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9247 
9248  // 1. Process free space before this allocation.
9249  if(lastOffset < suballoc.offset)
9250  {
9251  // There is free space from lastOffset to suballoc.offset.
9252  ++unusedRangeCount;
9253  }
9254 
9255  // 2. Process this allocation.
9256  // There is allocation with suballoc.offset, suballoc.size.
9257  ++alloc2ndCount;
9258  usedBytes += suballoc.size;
9259 
9260  // 3. Prepare for next iteration.
9261  lastOffset = suballoc.offset + suballoc.size;
9262  ++nextAlloc2ndIndex;
9263  }
9264  // We are at the end.
9265  else
9266  {
9267  if(lastOffset < freeSpace2ndTo1stEnd)
9268  {
9269  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9270  ++unusedRangeCount;
9271  }
9272 
9273  // End of loop.
9274  lastOffset = freeSpace2ndTo1stEnd;
9275  }
9276  }
9277  }
9278 
9279  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
9280  size_t alloc1stCount = 0;
9281  const VkDeviceSize freeSpace1stTo2ndEnd =
9282  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
9283  while(lastOffset < freeSpace1stTo2ndEnd)
9284  {
9285  // Find next non-null allocation or move nextAllocIndex to the end.
9286  while(nextAlloc1stIndex < suballoc1stCount &&
9287  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9288  {
9289  ++nextAlloc1stIndex;
9290  }
9291 
9292  // Found non-null allocation.
9293  if(nextAlloc1stIndex < suballoc1stCount)
9294  {
9295  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9296 
9297  // 1. Process free space before this allocation.
9298  if(lastOffset < suballoc.offset)
9299  {
9300  // There is free space from lastOffset to suballoc.offset.
9301  ++unusedRangeCount;
9302  }
9303 
9304  // 2. Process this allocation.
9305  // There is allocation with suballoc.offset, suballoc.size.
9306  ++alloc1stCount;
9307  usedBytes += suballoc.size;
9308 
9309  // 3. Prepare for next iteration.
9310  lastOffset = suballoc.offset + suballoc.size;
9311  ++nextAlloc1stIndex;
9312  }
9313  // We are at the end.
9314  else
9315  {
9316  if(lastOffset < size)
9317  {
9318  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9319  ++unusedRangeCount;
9320  }
9321 
9322  // End of loop.
9323  lastOffset = freeSpace1stTo2ndEnd;
9324  }
9325  }
9326 
9327  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9328  {
9329  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9330  while(lastOffset < size)
9331  {
9332  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9333  while(nextAlloc2ndIndex != SIZE_MAX &&
9334  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9335  {
9336  --nextAlloc2ndIndex;
9337  }
9338 
9339  // Found non-null allocation.
9340  if(nextAlloc2ndIndex != SIZE_MAX)
9341  {
9342  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9343 
9344  // 1. Process free space before this allocation.
9345  if(lastOffset < suballoc.offset)
9346  {
9347  // There is free space from lastOffset to suballoc.offset.
9348  ++unusedRangeCount;
9349  }
9350 
9351  // 2. Process this allocation.
9352  // There is allocation with suballoc.offset, suballoc.size.
9353  ++alloc2ndCount;
9354  usedBytes += suballoc.size;
9355 
9356  // 3. Prepare for next iteration.
9357  lastOffset = suballoc.offset + suballoc.size;
9358  --nextAlloc2ndIndex;
9359  }
9360  // We are at the end.
9361  else
9362  {
9363  if(lastOffset < size)
9364  {
9365  // There is free space from lastOffset to size.
9366  ++unusedRangeCount;
9367  }
9368 
9369  // End of loop.
9370  lastOffset = size;
9371  }
9372  }
9373  }
9374 
9375  const VkDeviceSize unusedBytes = size - usedBytes;
9376  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
9377 
9378  // SECOND PASS
9379  lastOffset = 0;
9380 
9381  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9382  {
9383  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
9384  size_t nextAlloc2ndIndex = 0;
9385  while(lastOffset < freeSpace2ndTo1stEnd)
9386  {
9387  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9388  while(nextAlloc2ndIndex < suballoc2ndCount &&
9389  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9390  {
9391  ++nextAlloc2ndIndex;
9392  }
9393 
9394  // Found non-null allocation.
9395  if(nextAlloc2ndIndex < suballoc2ndCount)
9396  {
9397  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9398 
9399  // 1. Process free space before this allocation.
9400  if(lastOffset < suballoc.offset)
9401  {
9402  // There is free space from lastOffset to suballoc.offset.
9403  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9404  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9405  }
9406 
9407  // 2. Process this allocation.
9408  // There is allocation with suballoc.offset, suballoc.size.
9409  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9410 
9411  // 3. Prepare for next iteration.
9412  lastOffset = suballoc.offset + suballoc.size;
9413  ++nextAlloc2ndIndex;
9414  }
9415  // We are at the end.
9416  else
9417  {
9418  if(lastOffset < freeSpace2ndTo1stEnd)
9419  {
9420  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
9421  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
9422  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9423  }
9424 
9425  // End of loop.
9426  lastOffset = freeSpace2ndTo1stEnd;
9427  }
9428  }
9429  }
9430 
9431  nextAlloc1stIndex = m_1stNullItemsBeginCount;
9432  while(lastOffset < freeSpace1stTo2ndEnd)
9433  {
9434  // Find next non-null allocation or move nextAllocIndex to the end.
9435  while(nextAlloc1stIndex < suballoc1stCount &&
9436  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
9437  {
9438  ++nextAlloc1stIndex;
9439  }
9440 
9441  // Found non-null allocation.
9442  if(nextAlloc1stIndex < suballoc1stCount)
9443  {
9444  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
9445 
9446  // 1. Process free space before this allocation.
9447  if(lastOffset < suballoc.offset)
9448  {
9449  // There is free space from lastOffset to suballoc.offset.
9450  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9451  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9452  }
9453 
9454  // 2. Process this allocation.
9455  // There is allocation with suballoc.offset, suballoc.size.
9456  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9457 
9458  // 3. Prepare for next iteration.
9459  lastOffset = suballoc.offset + suballoc.size;
9460  ++nextAlloc1stIndex;
9461  }
9462  // We are at the end.
9463  else
9464  {
9465  if(lastOffset < freeSpace1stTo2ndEnd)
9466  {
9467  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
9468  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
9469  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9470  }
9471 
9472  // End of loop.
9473  lastOffset = freeSpace1stTo2ndEnd;
9474  }
9475  }
9476 
9477  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9478  {
9479  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
9480  while(lastOffset < size)
9481  {
9482  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
9483  while(nextAlloc2ndIndex != SIZE_MAX &&
9484  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
9485  {
9486  --nextAlloc2ndIndex;
9487  }
9488 
9489  // Found non-null allocation.
9490  if(nextAlloc2ndIndex != SIZE_MAX)
9491  {
9492  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
9493 
9494  // 1. Process free space before this allocation.
9495  if(lastOffset < suballoc.offset)
9496  {
9497  // There is free space from lastOffset to suballoc.offset.
9498  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
9499  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9500  }
9501 
9502  // 2. Process this allocation.
9503  // There is allocation with suballoc.offset, suballoc.size.
9504  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9505 
9506  // 3. Prepare for next iteration.
9507  lastOffset = suballoc.offset + suballoc.size;
9508  --nextAlloc2ndIndex;
9509  }
9510  // We are at the end.
9511  else
9512  {
9513  if(lastOffset < size)
9514  {
9515  // There is free space from lastOffset to size.
9516  const VkDeviceSize unusedRangeSize = size - lastOffset;
9517  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
9518  }
9519 
9520  // End of loop.
9521  lastOffset = size;
9522  }
9523  }
9524  }
9525 
9526  PrintDetailedMap_End(json);
9527 }
9528 #endif // #if VMA_STATS_STRING_ENABLED
9529 
9530 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
9531  uint32_t currentFrameIndex,
9532  uint32_t frameInUseCount,
9533  VkDeviceSize bufferImageGranularity,
9534  VkDeviceSize allocSize,
9535  VkDeviceSize allocAlignment,
9536  bool upperAddress,
9537  VmaSuballocationType allocType,
9538  bool canMakeOtherLost,
9539  uint32_t strategy,
9540  VmaAllocationRequest* pAllocationRequest)
9541 {
9542  VMA_ASSERT(allocSize > 0);
9543  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9544  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9545  VMA_HEAVY_ASSERT(Validate());
9546 
9547  const VkDeviceSize size = GetSize();
9548  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9549  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9550 
9551  if(upperAddress)
9552  {
9553  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9554  {
9555  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
9556  return false;
9557  }
9558 
9559  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
9560  if(allocSize > size)
9561  {
9562  return false;
9563  }
9564  VkDeviceSize resultBaseOffset = size - allocSize;
9565  if(!suballocations2nd.empty())
9566  {
9567  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9568  resultBaseOffset = lastSuballoc.offset - allocSize;
9569  if(allocSize > lastSuballoc.offset)
9570  {
9571  return false;
9572  }
9573  }
9574 
9575  // Start from offset equal to end of free space.
9576  VkDeviceSize resultOffset = resultBaseOffset;
9577 
9578  // Apply VMA_DEBUG_MARGIN at the end.
9579  if(VMA_DEBUG_MARGIN > 0)
9580  {
9581  if(resultOffset < VMA_DEBUG_MARGIN)
9582  {
9583  return false;
9584  }
9585  resultOffset -= VMA_DEBUG_MARGIN;
9586  }
9587 
9588  // Apply alignment.
9589  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
9590 
9591  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
9592  // Make bigger alignment if necessary.
9593  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9594  {
9595  bool bufferImageGranularityConflict = false;
9596  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9597  {
9598  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9599  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9600  {
9601  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
9602  {
9603  bufferImageGranularityConflict = true;
9604  break;
9605  }
9606  }
9607  else
9608  // Already on previous page.
9609  break;
9610  }
9611  if(bufferImageGranularityConflict)
9612  {
9613  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
9614  }
9615  }
9616 
9617  // There is enough free space.
9618  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
9619  suballocations1st.back().offset + suballocations1st.back().size :
9620  0;
9621  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
9622  {
9623  // Check previous suballocations for BufferImageGranularity conflicts.
9624  // If conflict exists, allocation cannot be made here.
9625  if(bufferImageGranularity > 1)
9626  {
9627  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9628  {
9629  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9630  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9631  {
9632  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
9633  {
9634  return false;
9635  }
9636  }
9637  else
9638  {
9639  // Already on next page.
9640  break;
9641  }
9642  }
9643  }
9644 
9645  // All tests passed: Success.
9646  pAllocationRequest->offset = resultOffset;
9647  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
9648  pAllocationRequest->sumItemSize = 0;
9649  // pAllocationRequest->item unused.
9650  pAllocationRequest->itemsToMakeLostCount = 0;
9651  return true;
9652  }
9653  }
9654  else // !upperAddress
9655  {
9656  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9657  {
9658  // Try to allocate at the end of 1st vector.
9659 
9660  VkDeviceSize resultBaseOffset = 0;
9661  if(!suballocations1st.empty())
9662  {
9663  const VmaSuballocation& lastSuballoc = suballocations1st.back();
9664  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9665  }
9666 
9667  // Start from offset equal to beginning of free space.
9668  VkDeviceSize resultOffset = resultBaseOffset;
9669 
9670  // Apply VMA_DEBUG_MARGIN at the beginning.
9671  if(VMA_DEBUG_MARGIN > 0)
9672  {
9673  resultOffset += VMA_DEBUG_MARGIN;
9674  }
9675 
9676  // Apply alignment.
9677  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9678 
9679  // Check previous suballocations for BufferImageGranularity conflicts.
9680  // Make bigger alignment if necessary.
9681  if(bufferImageGranularity > 1 && !suballocations1st.empty())
9682  {
9683  bool bufferImageGranularityConflict = false;
9684  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
9685  {
9686  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
9687  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9688  {
9689  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9690  {
9691  bufferImageGranularityConflict = true;
9692  break;
9693  }
9694  }
9695  else
9696  // Already on previous page.
9697  break;
9698  }
9699  if(bufferImageGranularityConflict)
9700  {
9701  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9702  }
9703  }
9704 
9705  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
9706  suballocations2nd.back().offset : size;
9707 
9708  // There is enough free space at the end after alignment.
9709  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
9710  {
9711  // Check next suballocations for BufferImageGranularity conflicts.
9712  // If conflict exists, allocation cannot be made here.
9713  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
9714  {
9715  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
9716  {
9717  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
9718  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9719  {
9720  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9721  {
9722  return false;
9723  }
9724  }
9725  else
9726  {
9727  // Already on previous page.
9728  break;
9729  }
9730  }
9731  }
9732 
9733  // All tests passed: Success.
9734  pAllocationRequest->offset = resultOffset;
9735  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
9736  pAllocationRequest->sumItemSize = 0;
9737  // pAllocationRequest->item unused.
9738  pAllocationRequest->itemsToMakeLostCount = 0;
9739  return true;
9740  }
9741  }
9742 
9743  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
9744  // beginning of 1st vector as the end of free space.
9745  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
9746  {
9747  VMA_ASSERT(!suballocations1st.empty());
9748 
9749  VkDeviceSize resultBaseOffset = 0;
9750  if(!suballocations2nd.empty())
9751  {
9752  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
9753  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
9754  }
9755 
9756  // Start from offset equal to beginning of free space.
9757  VkDeviceSize resultOffset = resultBaseOffset;
9758 
9759  // Apply VMA_DEBUG_MARGIN at the beginning.
9760  if(VMA_DEBUG_MARGIN > 0)
9761  {
9762  resultOffset += VMA_DEBUG_MARGIN;
9763  }
9764 
9765  // Apply alignment.
9766  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
9767 
9768  // Check previous suballocations for BufferImageGranularity conflicts.
9769  // Make bigger alignment if necessary.
9770  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
9771  {
9772  bool bufferImageGranularityConflict = false;
9773  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
9774  {
9775  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
9776  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
9777  {
9778  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9779  {
9780  bufferImageGranularityConflict = true;
9781  break;
9782  }
9783  }
9784  else
9785  // Already on previous page.
9786  break;
9787  }
9788  if(bufferImageGranularityConflict)
9789  {
9790  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
9791  }
9792  }
9793 
9794  pAllocationRequest->itemsToMakeLostCount = 0;
9795  pAllocationRequest->sumItemSize = 0;
9796  size_t index1st = m_1stNullItemsBeginCount;
9797 
9798  if(canMakeOtherLost)
9799  {
9800  while(index1st < suballocations1st.size() &&
9801  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
9802  {
9803  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
9804  const VmaSuballocation& suballoc = suballocations1st[index1st];
9805  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9806  {
9807  // No problem.
9808  }
9809  else
9810  {
9811  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9812  if(suballoc.hAllocation->CanBecomeLost() &&
9813  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9814  {
9815  ++pAllocationRequest->itemsToMakeLostCount;
9816  pAllocationRequest->sumItemSize += suballoc.size;
9817  }
9818  else
9819  {
9820  return false;
9821  }
9822  }
9823  ++index1st;
9824  }
9825 
9826  // Check next suballocations for BufferImageGranularity conflicts.
9827  // If conflict exists, we must mark more allocations lost or fail.
9828  if(bufferImageGranularity > 1)
9829  {
9830  while(index1st < suballocations1st.size())
9831  {
9832  const VmaSuballocation& suballoc = suballocations1st[index1st];
9833  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
9834  {
9835  if(suballoc.hAllocation != VK_NULL_HANDLE)
9836  {
9837  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
9838  if(suballoc.hAllocation->CanBecomeLost() &&
9839  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9840  {
9841  ++pAllocationRequest->itemsToMakeLostCount;
9842  pAllocationRequest->sumItemSize += suballoc.size;
9843  }
9844  else
9845  {
9846  return false;
9847  }
9848  }
9849  }
9850  else
9851  {
9852  // Already on next page.
9853  break;
9854  }
9855  ++index1st;
9856  }
9857  }
9858  }
9859 
9860  // There is enough free space at the end after alignment.
9861  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN < size) ||
9862  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
9863  {
9864  // Check next suballocations for BufferImageGranularity conflicts.
9865  // If conflict exists, allocation cannot be made here.
9866  if(bufferImageGranularity > 1)
9867  {
9868  for(size_t nextSuballocIndex = index1st;
9869  nextSuballocIndex < suballocations1st.size();
9870  nextSuballocIndex++)
9871  {
9872  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
9873  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9874  {
9875  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9876  {
9877  return false;
9878  }
9879  }
9880  else
9881  {
9882  // Already on next page.
9883  break;
9884  }
9885  }
9886  }
9887 
9888  // All tests passed: Success.
9889  pAllocationRequest->offset = resultOffset;
9890  pAllocationRequest->sumFreeSize =
9891  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
9892  - resultBaseOffset
9893  - pAllocationRequest->sumItemSize;
9894  // pAllocationRequest->item unused.
9895  return true;
9896  }
9897  }
9898  }
9899 
9900  return false;
9901 }
9902 
9903 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
9904  uint32_t currentFrameIndex,
9905  uint32_t frameInUseCount,
9906  VmaAllocationRequest* pAllocationRequest)
9907 {
9908  if(pAllocationRequest->itemsToMakeLostCount == 0)
9909  {
9910  return true;
9911  }
9912 
9913  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
9914 
9915  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9916  size_t index1st = m_1stNullItemsBeginCount;
9917  size_t madeLostCount = 0;
9918  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
9919  {
9920  VMA_ASSERT(index1st < suballocations1st.size());
9921  VmaSuballocation& suballoc = suballocations1st[index1st];
9922  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9923  {
9924  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
9925  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
9926  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9927  {
9928  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9929  suballoc.hAllocation = VK_NULL_HANDLE;
9930  m_SumFreeSize += suballoc.size;
9931  ++m_1stNullItemsMiddleCount;
9932  ++madeLostCount;
9933  }
9934  else
9935  {
9936  return false;
9937  }
9938  }
9939  ++index1st;
9940  }
9941 
9942  CleanupAfterFree();
9943  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
9944 
9945  return true;
9946 }
9947 
9948 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9949 {
9950  uint32_t lostAllocationCount = 0;
9951 
9952  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9953  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9954  {
9955  VmaSuballocation& suballoc = suballocations1st[i];
9956  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9957  suballoc.hAllocation->CanBecomeLost() &&
9958  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9959  {
9960  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9961  suballoc.hAllocation = VK_NULL_HANDLE;
9962  ++m_1stNullItemsMiddleCount;
9963  m_SumFreeSize += suballoc.size;
9964  ++lostAllocationCount;
9965  }
9966  }
9967 
9968  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9969  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
9970  {
9971  VmaSuballocation& suballoc = suballocations2nd[i];
9972  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
9973  suballoc.hAllocation->CanBecomeLost() &&
9974  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9975  {
9976  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9977  suballoc.hAllocation = VK_NULL_HANDLE;
9978  ++m_2ndNullItemsCount;
9979  ++lostAllocationCount;
9980  }
9981  }
9982 
9983  if(lostAllocationCount)
9984  {
9985  CleanupAfterFree();
9986  }
9987 
9988  return lostAllocationCount;
9989 }
9990 
9991 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
9992 {
9993  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9994  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
9995  {
9996  const VmaSuballocation& suballoc = suballocations1st[i];
9997  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9998  {
9999  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10000  {
10001  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10002  return VK_ERROR_VALIDATION_FAILED_EXT;
10003  }
10004  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10005  {
10006  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10007  return VK_ERROR_VALIDATION_FAILED_EXT;
10008  }
10009  }
10010  }
10011 
10012  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10013  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
10014  {
10015  const VmaSuballocation& suballoc = suballocations2nd[i];
10016  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
10017  {
10018  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
10019  {
10020  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
10021  return VK_ERROR_VALIDATION_FAILED_EXT;
10022  }
10023  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
10024  {
10025  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
10026  return VK_ERROR_VALIDATION_FAILED_EXT;
10027  }
10028  }
10029  }
10030 
10031  return VK_SUCCESS;
10032 }
10033 
10034 void VmaBlockMetadata_Linear::Alloc(
10035  const VmaAllocationRequest& request,
10036  VmaSuballocationType type,
10037  VkDeviceSize allocSize,
10038  bool upperAddress,
10039  VmaAllocation hAllocation)
10040 {
10041  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
10042 
10043  if(upperAddress)
10044  {
10045  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
10046  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
10047  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10048  suballocations2nd.push_back(newSuballoc);
10049  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
10050  }
10051  else
10052  {
10053  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10054 
10055  // First allocation.
10056  if(suballocations1st.empty())
10057  {
10058  suballocations1st.push_back(newSuballoc);
10059  }
10060  else
10061  {
10062  // New allocation at the end of 1st vector.
10063  if(request.offset >= suballocations1st.back().offset + suballocations1st.back().size)
10064  {
10065  // Check if it fits before the end of the block.
10066  VMA_ASSERT(request.offset + allocSize <= GetSize());
10067  suballocations1st.push_back(newSuballoc);
10068  }
10069  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
10070  else if(request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset)
10071  {
10072  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10073 
10074  switch(m_2ndVectorMode)
10075  {
10076  case SECOND_VECTOR_EMPTY:
10077  // First allocation from second part ring buffer.
10078  VMA_ASSERT(suballocations2nd.empty());
10079  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
10080  break;
10081  case SECOND_VECTOR_RING_BUFFER:
10082  // 2-part ring buffer is already started.
10083  VMA_ASSERT(!suballocations2nd.empty());
10084  break;
10085  case SECOND_VECTOR_DOUBLE_STACK:
10086  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
10087  break;
10088  default:
10089  VMA_ASSERT(0);
10090  }
10091 
10092  suballocations2nd.push_back(newSuballoc);
10093  }
10094  else
10095  {
10096  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
10097  }
10098  }
10099  }
10100 
10101  m_SumFreeSize -= newSuballoc.size;
10102 }
10103 
10104 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
10105 {
10106  FreeAtOffset(allocation->GetOffset());
10107 }
10108 
10109 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
10110 {
10111  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10112  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10113 
10114  if(!suballocations1st.empty())
10115  {
10116  // First allocation: Mark it as next empty at the beginning.
10117  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10118  if(firstSuballoc.offset == offset)
10119  {
10120  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10121  firstSuballoc.hAllocation = VK_NULL_HANDLE;
10122  m_SumFreeSize += firstSuballoc.size;
10123  ++m_1stNullItemsBeginCount;
10124  CleanupAfterFree();
10125  return;
10126  }
10127  }
10128 
10129  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
10130  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
10131  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10132  {
10133  VmaSuballocation& lastSuballoc = suballocations2nd.back();
10134  if(lastSuballoc.offset == offset)
10135  {
10136  m_SumFreeSize += lastSuballoc.size;
10137  suballocations2nd.pop_back();
10138  CleanupAfterFree();
10139  return;
10140  }
10141  }
10142  // Last allocation in 1st vector.
10143  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
10144  {
10145  VmaSuballocation& lastSuballoc = suballocations1st.back();
10146  if(lastSuballoc.offset == offset)
10147  {
10148  m_SumFreeSize += lastSuballoc.size;
10149  suballocations1st.pop_back();
10150  CleanupAfterFree();
10151  return;
10152  }
10153  }
10154 
10155  // Item from the middle of 1st vector.
10156  {
10157  VmaSuballocation refSuballoc;
10158  refSuballoc.offset = offset;
10159  // Rest of members stays uninitialized intentionally for better performance.
10160  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
10161  suballocations1st.begin() + m_1stNullItemsBeginCount,
10162  suballocations1st.end(),
10163  refSuballoc);
10164  if(it != suballocations1st.end())
10165  {
10166  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10167  it->hAllocation = VK_NULL_HANDLE;
10168  ++m_1stNullItemsMiddleCount;
10169  m_SumFreeSize += it->size;
10170  CleanupAfterFree();
10171  return;
10172  }
10173  }
10174 
10175  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
10176  {
10177  // Item from the middle of 2nd vector.
10178  VmaSuballocation refSuballoc;
10179  refSuballoc.offset = offset;
10180  // Rest of members stays uninitialized intentionally for better performance.
10181  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
10182  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
10183  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
10184  if(it != suballocations2nd.end())
10185  {
10186  it->type = VMA_SUBALLOCATION_TYPE_FREE;
10187  it->hAllocation = VK_NULL_HANDLE;
10188  ++m_2ndNullItemsCount;
10189  m_SumFreeSize += it->size;
10190  CleanupAfterFree();
10191  return;
10192  }
10193  }
10194 
10195  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
10196 }
10197 
10198 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
10199 {
10200  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10201  const size_t suballocCount = AccessSuballocations1st().size();
10202  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
10203 }
10204 
10205 void VmaBlockMetadata_Linear::CleanupAfterFree()
10206 {
10207  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10208  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10209 
10210  if(IsEmpty())
10211  {
10212  suballocations1st.clear();
10213  suballocations2nd.clear();
10214  m_1stNullItemsBeginCount = 0;
10215  m_1stNullItemsMiddleCount = 0;
10216  m_2ndNullItemsCount = 0;
10217  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10218  }
10219  else
10220  {
10221  const size_t suballoc1stCount = suballocations1st.size();
10222  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
10223  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
10224 
10225  // Find more null items at the beginning of 1st vector.
10226  while(m_1stNullItemsBeginCount < suballoc1stCount &&
10227  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10228  {
10229  ++m_1stNullItemsBeginCount;
10230  --m_1stNullItemsMiddleCount;
10231  }
10232 
10233  // Find more null items at the end of 1st vector.
10234  while(m_1stNullItemsMiddleCount > 0 &&
10235  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
10236  {
10237  --m_1stNullItemsMiddleCount;
10238  suballocations1st.pop_back();
10239  }
10240 
10241  // Find more null items at the end of 2nd vector.
10242  while(m_2ndNullItemsCount > 0 &&
10243  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
10244  {
10245  --m_2ndNullItemsCount;
10246  suballocations2nd.pop_back();
10247  }
10248 
10249  if(ShouldCompact1st())
10250  {
10251  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
10252  size_t srcIndex = m_1stNullItemsBeginCount;
10253  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
10254  {
10255  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
10256  {
10257  ++srcIndex;
10258  }
10259  if(dstIndex != srcIndex)
10260  {
10261  suballocations1st[dstIndex] = suballocations1st[srcIndex];
10262  }
10263  ++srcIndex;
10264  }
10265  suballocations1st.resize(nonNullItemCount);
10266  m_1stNullItemsBeginCount = 0;
10267  m_1stNullItemsMiddleCount = 0;
10268  }
10269 
10270  // 2nd vector became empty.
10271  if(suballocations2nd.empty())
10272  {
10273  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10274  }
10275 
10276  // 1st vector became empty.
10277  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
10278  {
10279  suballocations1st.clear();
10280  m_1stNullItemsBeginCount = 0;
10281 
10282  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10283  {
10284  // Swap 1st with 2nd. Now 2nd is empty.
10285  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
10286  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
10287  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
10288  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
10289  {
10290  ++m_1stNullItemsBeginCount;
10291  --m_1stNullItemsMiddleCount;
10292  }
10293  m_2ndNullItemsCount = 0;
10294  m_1stVectorIndex ^= 1;
10295  }
10296  }
10297  }
10298 
10299  VMA_HEAVY_ASSERT(Validate());
10300 }
10301 
10302 
10304 // class VmaBlockMetadata_Buddy
10305 
10306 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
10307  VmaBlockMetadata(hAllocator),
10308  m_Root(VMA_NULL),
10309  m_AllocationCount(0),
10310  m_FreeCount(1),
10311  m_SumFreeSize(0)
10312 {
10313  memset(m_FreeList, 0, sizeof(m_FreeList));
10314 }
10315 
10316 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
10317 {
10318  DeleteNode(m_Root);
10319 }
10320 
10321 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
10322 {
10323  VmaBlockMetadata::Init(size);
10324 
10325  m_UsableSize = VmaPrevPow2(size);
10326  m_SumFreeSize = m_UsableSize;
10327 
10328  // Calculate m_LevelCount.
10329  m_LevelCount = 1;
10330  while(m_LevelCount < MAX_LEVELS &&
10331  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
10332  {
10333  ++m_LevelCount;
10334  }
10335 
10336  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
10337  rootNode->offset = 0;
10338  rootNode->type = Node::TYPE_FREE;
10339  rootNode->parent = VMA_NULL;
10340  rootNode->buddy = VMA_NULL;
10341 
10342  m_Root = rootNode;
10343  AddToFreeListFront(0, rootNode);
10344 }
10345 
10346 bool VmaBlockMetadata_Buddy::Validate() const
10347 {
10348  // Validate tree.
10349  ValidationContext ctx;
10350  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
10351  {
10352  VMA_VALIDATE(false && "ValidateNode failed.");
10353  }
10354  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
10355  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
10356 
10357  // Validate free node lists.
10358  for(uint32_t level = 0; level < m_LevelCount; ++level)
10359  {
10360  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
10361  m_FreeList[level].front->free.prev == VMA_NULL);
10362 
10363  for(Node* node = m_FreeList[level].front;
10364  node != VMA_NULL;
10365  node = node->free.next)
10366  {
10367  VMA_VALIDATE(node->type == Node::TYPE_FREE);
10368 
10369  if(node->free.next == VMA_NULL)
10370  {
10371  VMA_VALIDATE(m_FreeList[level].back == node);
10372  }
10373  else
10374  {
10375  VMA_VALIDATE(node->free.next->free.prev == node);
10376  }
10377  }
10378  }
10379 
10380  // Validate that free lists ar higher levels are empty.
10381  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
10382  {
10383  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
10384  }
10385 
10386  return true;
10387 }
10388 
10389 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
10390 {
10391  for(uint32_t level = 0; level < m_LevelCount; ++level)
10392  {
10393  if(m_FreeList[level].front != VMA_NULL)
10394  {
10395  return LevelToNodeSize(level);
10396  }
10397  }
10398  return 0;
10399 }
10400 
10401 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10402 {
10403  const VkDeviceSize unusableSize = GetUnusableSize();
10404 
10405  outInfo.blockCount = 1;
10406 
10407  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
10408  outInfo.usedBytes = outInfo.unusedBytes = 0;
10409 
10410  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
10411  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
10412  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
10413 
10414  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
10415 
10416  if(unusableSize > 0)
10417  {
10418  ++outInfo.unusedRangeCount;
10419  outInfo.unusedBytes += unusableSize;
10420  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
10421  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
10422  }
10423 }
10424 
10425 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
10426 {
10427  const VkDeviceSize unusableSize = GetUnusableSize();
10428 
10429  inoutStats.size += GetSize();
10430  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
10431  inoutStats.allocationCount += m_AllocationCount;
10432  inoutStats.unusedRangeCount += m_FreeCount;
10433  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
10434 
10435  if(unusableSize > 0)
10436  {
10437  ++inoutStats.unusedRangeCount;
10438  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
10439  }
10440 }
10441 
10442 #if VMA_STATS_STRING_ENABLED
10443 
10444 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
10445 {
10446  // TODO optimize
10447  VmaStatInfo stat;
10448  CalcAllocationStatInfo(stat);
10449 
10450  PrintDetailedMap_Begin(
10451  json,
10452  stat.unusedBytes,
10453  stat.allocationCount,
10454  stat.unusedRangeCount);
10455 
10456  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
10457 
10458  const VkDeviceSize unusableSize = GetUnusableSize();
10459  if(unusableSize > 0)
10460  {
10461  PrintDetailedMap_UnusedRange(json,
10462  m_UsableSize, // offset
10463  unusableSize); // size
10464  }
10465 
10466  PrintDetailedMap_End(json);
10467 }
10468 
10469 #endif // #if VMA_STATS_STRING_ENABLED
10470 
10471 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
10472  uint32_t currentFrameIndex,
10473  uint32_t frameInUseCount,
10474  VkDeviceSize bufferImageGranularity,
10475  VkDeviceSize allocSize,
10476  VkDeviceSize allocAlignment,
10477  bool upperAddress,
10478  VmaSuballocationType allocType,
10479  bool canMakeOtherLost,
10480  uint32_t strategy,
10481  VmaAllocationRequest* pAllocationRequest)
10482 {
10483  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
10484 
10485  // Simple way to respect bufferImageGranularity. May be optimized some day.
10486  // Whenever it might be an OPTIMAL image...
10487  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
10488  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
10489  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
10490  {
10491  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
10492  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
10493  }
10494 
10495  if(allocSize > m_UsableSize)
10496  {
10497  return false;
10498  }
10499 
10500  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10501  for(uint32_t level = targetLevel + 1; level--; )
10502  {
10503  for(Node* freeNode = m_FreeList[level].front;
10504  freeNode != VMA_NULL;
10505  freeNode = freeNode->free.next)
10506  {
10507  if(freeNode->offset % allocAlignment == 0)
10508  {
10509  pAllocationRequest->offset = freeNode->offset;
10510  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
10511  pAllocationRequest->sumItemSize = 0;
10512  pAllocationRequest->itemsToMakeLostCount = 0;
10513  pAllocationRequest->customData = (void*)(uintptr_t)level;
10514  return true;
10515  }
10516  }
10517  }
10518 
10519  return false;
10520 }
10521 
10522 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
10523  uint32_t currentFrameIndex,
10524  uint32_t frameInUseCount,
10525  VmaAllocationRequest* pAllocationRequest)
10526 {
10527  /*
10528  Lost allocations are not supported in buddy allocator at the moment.
10529  Support might be added in the future.
10530  */
10531  return pAllocationRequest->itemsToMakeLostCount == 0;
10532 }
10533 
10534 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
10535 {
10536  /*
10537  Lost allocations are not supported in buddy allocator at the moment.
10538  Support might be added in the future.
10539  */
10540  return 0;
10541 }
10542 
10543 void VmaBlockMetadata_Buddy::Alloc(
10544  const VmaAllocationRequest& request,
10545  VmaSuballocationType type,
10546  VkDeviceSize allocSize,
10547  bool upperAddress,
10548  VmaAllocation hAllocation)
10549 {
10550  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
10551  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
10552 
10553  Node* currNode = m_FreeList[currLevel].front;
10554  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10555  while(currNode->offset != request.offset)
10556  {
10557  currNode = currNode->free.next;
10558  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
10559  }
10560 
10561  // Go down, splitting free nodes.
10562  while(currLevel < targetLevel)
10563  {
10564  // currNode is already first free node at currLevel.
10565  // Remove it from list of free nodes at this currLevel.
10566  RemoveFromFreeList(currLevel, currNode);
10567 
10568  const uint32_t childrenLevel = currLevel + 1;
10569 
10570  // Create two free sub-nodes.
10571  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
10572  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
10573 
10574  leftChild->offset = currNode->offset;
10575  leftChild->type = Node::TYPE_FREE;
10576  leftChild->parent = currNode;
10577  leftChild->buddy = rightChild;
10578 
10579  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
10580  rightChild->type = Node::TYPE_FREE;
10581  rightChild->parent = currNode;
10582  rightChild->buddy = leftChild;
10583 
10584  // Convert current currNode to split type.
10585  currNode->type = Node::TYPE_SPLIT;
10586  currNode->split.leftChild = leftChild;
10587 
10588  // Add child nodes to free list. Order is important!
10589  AddToFreeListFront(childrenLevel, rightChild);
10590  AddToFreeListFront(childrenLevel, leftChild);
10591 
10592  ++m_FreeCount;
10593  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
10594  ++currLevel;
10595  currNode = m_FreeList[currLevel].front;
10596 
10597  /*
10598  We can be sure that currNode, as left child of node previously split,
10599  also fullfills the alignment requirement.
10600  */
10601  }
10602 
10603  // Remove from free list.
10604  VMA_ASSERT(currLevel == targetLevel &&
10605  currNode != VMA_NULL &&
10606  currNode->type == Node::TYPE_FREE);
10607  RemoveFromFreeList(currLevel, currNode);
10608 
10609  // Convert to allocation node.
10610  currNode->type = Node::TYPE_ALLOCATION;
10611  currNode->allocation.alloc = hAllocation;
10612 
10613  ++m_AllocationCount;
10614  --m_FreeCount;
10615  m_SumFreeSize -= allocSize;
10616 }
10617 
10618 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
10619 {
10620  if(node->type == Node::TYPE_SPLIT)
10621  {
10622  DeleteNode(node->split.leftChild->buddy);
10623  DeleteNode(node->split.leftChild);
10624  }
10625 
10626  vma_delete(GetAllocationCallbacks(), node);
10627 }
10628 
10629 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
10630 {
10631  VMA_VALIDATE(level < m_LevelCount);
10632  VMA_VALIDATE(curr->parent == parent);
10633  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
10634  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
10635  switch(curr->type)
10636  {
10637  case Node::TYPE_FREE:
10638  // curr->free.prev, next are validated separately.
10639  ctx.calculatedSumFreeSize += levelNodeSize;
10640  ++ctx.calculatedFreeCount;
10641  break;
10642  case Node::TYPE_ALLOCATION:
10643  ++ctx.calculatedAllocationCount;
10644  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
10645  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
10646  break;
10647  case Node::TYPE_SPLIT:
10648  {
10649  const uint32_t childrenLevel = level + 1;
10650  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
10651  const Node* const leftChild = curr->split.leftChild;
10652  VMA_VALIDATE(leftChild != VMA_NULL);
10653  VMA_VALIDATE(leftChild->offset == curr->offset);
10654  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
10655  {
10656  VMA_VALIDATE(false && "ValidateNode for left child failed.");
10657  }
10658  const Node* const rightChild = leftChild->buddy;
10659  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
10660  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
10661  {
10662  VMA_VALIDATE(false && "ValidateNode for right child failed.");
10663  }
10664  }
10665  break;
10666  default:
10667  return false;
10668  }
10669 
10670  return true;
10671 }
10672 
10673 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
10674 {
10675  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
10676  uint32_t level = 0;
10677  VkDeviceSize currLevelNodeSize = m_UsableSize;
10678  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
10679  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
10680  {
10681  ++level;
10682  currLevelNodeSize = nextLevelNodeSize;
10683  nextLevelNodeSize = currLevelNodeSize >> 1;
10684  }
10685  return level;
10686 }
10687 
10688 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
10689 {
10690  // Find node and level.
10691  Node* node = m_Root;
10692  VkDeviceSize nodeOffset = 0;
10693  uint32_t level = 0;
10694  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
10695  while(node->type == Node::TYPE_SPLIT)
10696  {
10697  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
10698  if(offset < nodeOffset + nextLevelSize)
10699  {
10700  node = node->split.leftChild;
10701  }
10702  else
10703  {
10704  node = node->split.leftChild->buddy;
10705  nodeOffset += nextLevelSize;
10706  }
10707  ++level;
10708  levelNodeSize = nextLevelSize;
10709  }
10710 
10711  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
10712  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
10713 
10714  ++m_FreeCount;
10715  --m_AllocationCount;
10716  m_SumFreeSize += alloc->GetSize();
10717 
10718  node->type = Node::TYPE_FREE;
10719 
10720  // Join free nodes if possible.
10721  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
10722  {
10723  RemoveFromFreeList(level, node->buddy);
10724  Node* const parent = node->parent;
10725 
10726  vma_delete(GetAllocationCallbacks(), node->buddy);
10727  vma_delete(GetAllocationCallbacks(), node);
10728  parent->type = Node::TYPE_FREE;
10729 
10730  node = parent;
10731  --level;
10732  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
10733  --m_FreeCount;
10734  }
10735 
10736  AddToFreeListFront(level, node);
10737 }
10738 
10739 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
10740 {
10741  switch(node->type)
10742  {
10743  case Node::TYPE_FREE:
10744  ++outInfo.unusedRangeCount;
10745  outInfo.unusedBytes += levelNodeSize;
10746  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
10747  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
10748  break;
10749  case Node::TYPE_ALLOCATION:
10750  {
10751  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10752  ++outInfo.allocationCount;
10753  outInfo.usedBytes += allocSize;
10754  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
10755  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
10756 
10757  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
10758  if(unusedRangeSize > 0)
10759  {
10760  ++outInfo.unusedRangeCount;
10761  outInfo.unusedBytes += unusedRangeSize;
10762  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
10763  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
10764  }
10765  }
10766  break;
10767  case Node::TYPE_SPLIT:
10768  {
10769  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10770  const Node* const leftChild = node->split.leftChild;
10771  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
10772  const Node* const rightChild = leftChild->buddy;
10773  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
10774  }
10775  break;
10776  default:
10777  VMA_ASSERT(0);
10778  }
10779 }
10780 
10781 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
10782 {
10783  VMA_ASSERT(node->type == Node::TYPE_FREE);
10784 
10785  // List is empty.
10786  Node* const frontNode = m_FreeList[level].front;
10787  if(frontNode == VMA_NULL)
10788  {
10789  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
10790  node->free.prev = node->free.next = VMA_NULL;
10791  m_FreeList[level].front = m_FreeList[level].back = node;
10792  }
10793  else
10794  {
10795  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
10796  node->free.prev = VMA_NULL;
10797  node->free.next = frontNode;
10798  frontNode->free.prev = node;
10799  m_FreeList[level].front = node;
10800  }
10801 }
10802 
10803 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
10804 {
10805  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
10806 
10807  // It is at the front.
10808  if(node->free.prev == VMA_NULL)
10809  {
10810  VMA_ASSERT(m_FreeList[level].front == node);
10811  m_FreeList[level].front = node->free.next;
10812  }
10813  else
10814  {
10815  Node* const prevFreeNode = node->free.prev;
10816  VMA_ASSERT(prevFreeNode->free.next == node);
10817  prevFreeNode->free.next = node->free.next;
10818  }
10819 
10820  // It is at the back.
10821  if(node->free.next == VMA_NULL)
10822  {
10823  VMA_ASSERT(m_FreeList[level].back == node);
10824  m_FreeList[level].back = node->free.prev;
10825  }
10826  else
10827  {
10828  Node* const nextFreeNode = node->free.next;
10829  VMA_ASSERT(nextFreeNode->free.prev == node);
10830  nextFreeNode->free.prev = node->free.prev;
10831  }
10832 }
10833 
10834 #if VMA_STATS_STRING_ENABLED
10835 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
10836 {
10837  switch(node->type)
10838  {
10839  case Node::TYPE_FREE:
10840  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
10841  break;
10842  case Node::TYPE_ALLOCATION:
10843  {
10844  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
10845  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
10846  if(allocSize < levelNodeSize)
10847  {
10848  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
10849  }
10850  }
10851  break;
10852  case Node::TYPE_SPLIT:
10853  {
10854  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
10855  const Node* const leftChild = node->split.leftChild;
10856  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
10857  const Node* const rightChild = leftChild->buddy;
10858  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
10859  }
10860  break;
10861  default:
10862  VMA_ASSERT(0);
10863  }
10864 }
10865 #endif // #if VMA_STATS_STRING_ENABLED
10866 
10867 
10869 // class VmaDeviceMemoryBlock
10870 
10871 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
10872  m_pMetadata(VMA_NULL),
10873  m_MemoryTypeIndex(UINT32_MAX),
10874  m_Id(0),
10875  m_hMemory(VK_NULL_HANDLE),
10876  m_MapCount(0),
10877  m_pMappedData(VMA_NULL)
10878 {
10879 }
10880 
10881 void VmaDeviceMemoryBlock::Init(
10882  VmaAllocator hAllocator,
10883  uint32_t newMemoryTypeIndex,
10884  VkDeviceMemory newMemory,
10885  VkDeviceSize newSize,
10886  uint32_t id,
10887  uint32_t algorithm)
10888 {
10889  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
10890 
10891  m_MemoryTypeIndex = newMemoryTypeIndex;
10892  m_Id = id;
10893  m_hMemory = newMemory;
10894 
10895  switch(algorithm)
10896  {
10898  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
10899  break;
10901  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
10902  break;
10903  default:
10904  VMA_ASSERT(0);
10905  // Fall-through.
10906  case 0:
10907  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
10908  }
10909  m_pMetadata->Init(newSize);
10910 }
10911 
10912 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
10913 {
10914  // This is the most important assert in the entire library.
10915  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
10916  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
10917 
10918  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
10919  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
10920  m_hMemory = VK_NULL_HANDLE;
10921 
10922  vma_delete(allocator, m_pMetadata);
10923  m_pMetadata = VMA_NULL;
10924 }
10925 
10926 bool VmaDeviceMemoryBlock::Validate() const
10927 {
10928  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
10929  (m_pMetadata->GetSize() != 0));
10930 
10931  return m_pMetadata->Validate();
10932 }
10933 
10934 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
10935 {
10936  void* pData = nullptr;
10937  VkResult res = Map(hAllocator, 1, &pData);
10938  if(res != VK_SUCCESS)
10939  {
10940  return res;
10941  }
10942 
10943  res = m_pMetadata->CheckCorruption(pData);
10944 
10945  Unmap(hAllocator, 1);
10946 
10947  return res;
10948 }
10949 
10950 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
10951 {
10952  if(count == 0)
10953  {
10954  return VK_SUCCESS;
10955  }
10956 
10957  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10958  if(m_MapCount != 0)
10959  {
10960  m_MapCount += count;
10961  VMA_ASSERT(m_pMappedData != VMA_NULL);
10962  if(ppData != VMA_NULL)
10963  {
10964  *ppData = m_pMappedData;
10965  }
10966  return VK_SUCCESS;
10967  }
10968  else
10969  {
10970  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
10971  hAllocator->m_hDevice,
10972  m_hMemory,
10973  0, // offset
10974  VK_WHOLE_SIZE,
10975  0, // flags
10976  &m_pMappedData);
10977  if(result == VK_SUCCESS)
10978  {
10979  if(ppData != VMA_NULL)
10980  {
10981  *ppData = m_pMappedData;
10982  }
10983  m_MapCount = count;
10984  }
10985  return result;
10986  }
10987 }
10988 
10989 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
10990 {
10991  if(count == 0)
10992  {
10993  return;
10994  }
10995 
10996  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
10997  if(m_MapCount >= count)
10998  {
10999  m_MapCount -= count;
11000  if(m_MapCount == 0)
11001  {
11002  m_pMappedData = VMA_NULL;
11003  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
11004  }
11005  }
11006  else
11007  {
11008  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
11009  }
11010 }
11011 
11012 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11013 {
11014  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11015  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11016 
11017  void* pData;
11018  VkResult res = Map(hAllocator, 1, &pData);
11019  if(res != VK_SUCCESS)
11020  {
11021  return res;
11022  }
11023 
11024  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
11025  VmaWriteMagicValue(pData, allocOffset + allocSize);
11026 
11027  Unmap(hAllocator, 1);
11028 
11029  return VK_SUCCESS;
11030 }
11031 
11032 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
11033 {
11034  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
11035  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
11036 
11037  void* pData;
11038  VkResult res = Map(hAllocator, 1, &pData);
11039  if(res != VK_SUCCESS)
11040  {
11041  return res;
11042  }
11043 
11044  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
11045  {
11046  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
11047  }
11048  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
11049  {
11050  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
11051  }
11052 
11053  Unmap(hAllocator, 1);
11054 
11055  return VK_SUCCESS;
11056 }
11057 
11058 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
11059  const VmaAllocator hAllocator,
11060  const VmaAllocation hAllocation,
11061  VkBuffer hBuffer)
11062 {
11063  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11064  hAllocation->GetBlock() == this);
11065  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11066  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11067  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
11068  hAllocator->m_hDevice,
11069  hBuffer,
11070  m_hMemory,
11071  hAllocation->GetOffset());
11072 }
11073 
11074 VkResult VmaDeviceMemoryBlock::BindImageMemory(
11075  const VmaAllocator hAllocator,
11076  const VmaAllocation hAllocation,
11077  VkImage hImage)
11078 {
11079  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
11080  hAllocation->GetBlock() == this);
11081  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
11082  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
11083  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
11084  hAllocator->m_hDevice,
11085  hImage,
11086  m_hMemory,
11087  hAllocation->GetOffset());
11088 }
11089 
11090 static void InitStatInfo(VmaStatInfo& outInfo)
11091 {
11092  memset(&outInfo, 0, sizeof(outInfo));
11093  outInfo.allocationSizeMin = UINT64_MAX;
11094  outInfo.unusedRangeSizeMin = UINT64_MAX;
11095 }
11096 
11097 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
11098 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
11099 {
11100  inoutInfo.blockCount += srcInfo.blockCount;
11101  inoutInfo.allocationCount += srcInfo.allocationCount;
11102  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
11103  inoutInfo.usedBytes += srcInfo.usedBytes;
11104  inoutInfo.unusedBytes += srcInfo.unusedBytes;
11105  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
11106  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
11107  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
11108  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
11109 }
11110 
11111 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
11112 {
11113  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
11114  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
11115  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
11116  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
11117 }
11118 
11119 VmaPool_T::VmaPool_T(
11120  VmaAllocator hAllocator,
11121  const VmaPoolCreateInfo& createInfo,
11122  VkDeviceSize preferredBlockSize) :
11123  m_BlockVector(
11124  hAllocator,
11125  createInfo.memoryTypeIndex,
11126  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
11127  createInfo.minBlockCount,
11128  createInfo.maxBlockCount,
11129  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
11130  createInfo.frameInUseCount,
11131  true, // isCustomPool
11132  createInfo.blockSize != 0, // explicitBlockSize
11133  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
11134  m_Id(0)
11135 {
11136 }
11137 
11138 VmaPool_T::~VmaPool_T()
11139 {
11140 }
11141 
11142 #if VMA_STATS_STRING_ENABLED
11143 
11144 #endif // #if VMA_STATS_STRING_ENABLED
11145 
11146 VmaBlockVector::VmaBlockVector(
11147  VmaAllocator hAllocator,
11148  uint32_t memoryTypeIndex,
11149  VkDeviceSize preferredBlockSize,
11150  size_t minBlockCount,
11151  size_t maxBlockCount,
11152  VkDeviceSize bufferImageGranularity,
11153  uint32_t frameInUseCount,
11154  bool isCustomPool,
11155  bool explicitBlockSize,
11156  uint32_t algorithm) :
11157  m_hAllocator(hAllocator),
11158  m_MemoryTypeIndex(memoryTypeIndex),
11159  m_PreferredBlockSize(preferredBlockSize),
11160  m_MinBlockCount(minBlockCount),
11161  m_MaxBlockCount(maxBlockCount),
11162  m_BufferImageGranularity(bufferImageGranularity),
11163  m_FrameInUseCount(frameInUseCount),
11164  m_IsCustomPool(isCustomPool),
11165  m_ExplicitBlockSize(explicitBlockSize),
11166  m_Algorithm(algorithm),
11167  m_HasEmptyBlock(false),
11168  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
11169  m_NextBlockId(0)
11170 {
11171 }
11172 
11173 VmaBlockVector::~VmaBlockVector()
11174 {
11175  for(size_t i = m_Blocks.size(); i--; )
11176  {
11177  m_Blocks[i]->Destroy(m_hAllocator);
11178  vma_delete(m_hAllocator, m_Blocks[i]);
11179  }
11180 }
11181 
11182 VkResult VmaBlockVector::CreateMinBlocks()
11183 {
11184  for(size_t i = 0; i < m_MinBlockCount; ++i)
11185  {
11186  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
11187  if(res != VK_SUCCESS)
11188  {
11189  return res;
11190  }
11191  }
11192  return VK_SUCCESS;
11193 }
11194 
11195 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
11196 {
11197  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
11198 
11199  const size_t blockCount = m_Blocks.size();
11200 
11201  pStats->size = 0;
11202  pStats->unusedSize = 0;
11203  pStats->allocationCount = 0;
11204  pStats->unusedRangeCount = 0;
11205  pStats->unusedRangeSizeMax = 0;
11206  pStats->blockCount = blockCount;
11207 
11208  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
11209  {
11210  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
11211  VMA_ASSERT(pBlock);
11212  VMA_HEAVY_ASSERT(pBlock->Validate());
11213  pBlock->m_pMetadata->AddPoolStats(*pStats);
11214  }
11215 }
11216 
11217 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
11218 {
11219  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
11220  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
11221  (VMA_DEBUG_MARGIN > 0) &&
11222  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
11223 }
11224 
11225 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
11226 
11227 VkResult VmaBlockVector::Allocate(
11228  VmaPool hCurrentPool,
11229  uint32_t currentFrameIndex,
11230  VkDeviceSize size,
11231  VkDeviceSize alignment,
11232  const VmaAllocationCreateInfo& createInfo,
11233  VmaSuballocationType suballocType,
11234  VmaAllocation* pAllocation)
11235 {
11236  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11237  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
11238  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11239  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11240  const bool canCreateNewBlock =
11241  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
11242  (m_Blocks.size() < m_MaxBlockCount);
11243  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
11244 
11245  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
11246  // Which in turn is available only when maxBlockCount = 1.
11247  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
11248  {
11249  canMakeOtherLost = false;
11250  }
11251 
11252  // Upper address can only be used with linear allocator and within single memory block.
11253  if(isUpperAddress &&
11254  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
11255  {
11256  return VK_ERROR_FEATURE_NOT_PRESENT;
11257  }
11258 
11259  // Validate strategy.
11260  switch(strategy)
11261  {
11262  case 0:
11264  break;
11268  break;
11269  default:
11270  return VK_ERROR_FEATURE_NOT_PRESENT;
11271  }
11272 
11273  // Early reject: requested allocation size is larger that maximum block size for this block vector.
11274  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
11275  {
11276  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11277  }
11278 
11279  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11280 
11281  /*
11282  Under certain condition, this whole section can be skipped for optimization, so
11283  we move on directly to trying to allocate with canMakeOtherLost. That's the case
11284  e.g. for custom pools with linear algorithm.
11285  */
11286  if(!canMakeOtherLost || canCreateNewBlock)
11287  {
11288  // 1. Search existing allocations. Try to allocate without making other allocations lost.
11289  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
11291 
11292  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11293  {
11294  // Use only last block.
11295  if(!m_Blocks.empty())
11296  {
11297  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
11298  VMA_ASSERT(pCurrBlock);
11299  VkResult res = AllocateFromBlock(
11300  pCurrBlock,
11301  hCurrentPool,
11302  currentFrameIndex,
11303  size,
11304  alignment,
11305  allocFlagsCopy,
11306  createInfo.pUserData,
11307  suballocType,
11308  strategy,
11309  pAllocation);
11310  if(res == VK_SUCCESS)
11311  {
11312  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
11313  return VK_SUCCESS;
11314  }
11315  }
11316  }
11317  else
11318  {
11320  {
11321  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11322  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11323  {
11324  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11325  VMA_ASSERT(pCurrBlock);
11326  VkResult res = AllocateFromBlock(
11327  pCurrBlock,
11328  hCurrentPool,
11329  currentFrameIndex,
11330  size,
11331  alignment,
11332  allocFlagsCopy,
11333  createInfo.pUserData,
11334  suballocType,
11335  strategy,
11336  pAllocation);
11337  if(res == VK_SUCCESS)
11338  {
11339  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11340  return VK_SUCCESS;
11341  }
11342  }
11343  }
11344  else // WORST_FIT, FIRST_FIT
11345  {
11346  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11347  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11348  {
11349  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11350  VMA_ASSERT(pCurrBlock);
11351  VkResult res = AllocateFromBlock(
11352  pCurrBlock,
11353  hCurrentPool,
11354  currentFrameIndex,
11355  size,
11356  alignment,
11357  allocFlagsCopy,
11358  createInfo.pUserData,
11359  suballocType,
11360  strategy,
11361  pAllocation);
11362  if(res == VK_SUCCESS)
11363  {
11364  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
11365  return VK_SUCCESS;
11366  }
11367  }
11368  }
11369  }
11370 
11371  // 2. Try to create new block.
11372  if(canCreateNewBlock)
11373  {
11374  // Calculate optimal size for new block.
11375  VkDeviceSize newBlockSize = m_PreferredBlockSize;
11376  uint32_t newBlockSizeShift = 0;
11377  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
11378 
11379  if(!m_ExplicitBlockSize)
11380  {
11381  // Allocate 1/8, 1/4, 1/2 as first blocks.
11382  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
11383  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
11384  {
11385  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11386  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
11387  {
11388  newBlockSize = smallerNewBlockSize;
11389  ++newBlockSizeShift;
11390  }
11391  else
11392  {
11393  break;
11394  }
11395  }
11396  }
11397 
11398  size_t newBlockIndex = 0;
11399  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
11400  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
11401  if(!m_ExplicitBlockSize)
11402  {
11403  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
11404  {
11405  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
11406  if(smallerNewBlockSize >= size)
11407  {
11408  newBlockSize = smallerNewBlockSize;
11409  ++newBlockSizeShift;
11410  res = CreateBlock(newBlockSize, &newBlockIndex);
11411  }
11412  else
11413  {
11414  break;
11415  }
11416  }
11417  }
11418 
11419  if(res == VK_SUCCESS)
11420  {
11421  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
11422  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
11423 
11424  res = AllocateFromBlock(
11425  pBlock,
11426  hCurrentPool,
11427  currentFrameIndex,
11428  size,
11429  alignment,
11430  allocFlagsCopy,
11431  createInfo.pUserData,
11432  suballocType,
11433  strategy,
11434  pAllocation);
11435  if(res == VK_SUCCESS)
11436  {
11437  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
11438  return VK_SUCCESS;
11439  }
11440  else
11441  {
11442  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
11443  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11444  }
11445  }
11446  }
11447  }
11448 
11449  // 3. Try to allocate from existing blocks with making other allocations lost.
11450  if(canMakeOtherLost)
11451  {
11452  uint32_t tryIndex = 0;
11453  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
11454  {
11455  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
11456  VmaAllocationRequest bestRequest = {};
11457  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
11458 
11459  // 1. Search existing allocations.
11461  {
11462  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
11463  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
11464  {
11465  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11466  VMA_ASSERT(pCurrBlock);
11467  VmaAllocationRequest currRequest = {};
11468  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11469  currentFrameIndex,
11470  m_FrameInUseCount,
11471  m_BufferImageGranularity,
11472  size,
11473  alignment,
11474  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11475  suballocType,
11476  canMakeOtherLost,
11477  strategy,
11478  &currRequest))
11479  {
11480  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11481  if(pBestRequestBlock == VMA_NULL ||
11482  currRequestCost < bestRequestCost)
11483  {
11484  pBestRequestBlock = pCurrBlock;
11485  bestRequest = currRequest;
11486  bestRequestCost = currRequestCost;
11487 
11488  if(bestRequestCost == 0)
11489  {
11490  break;
11491  }
11492  }
11493  }
11494  }
11495  }
11496  else // WORST_FIT, FIRST_FIT
11497  {
11498  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
11499  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
11500  {
11501  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
11502  VMA_ASSERT(pCurrBlock);
11503  VmaAllocationRequest currRequest = {};
11504  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
11505  currentFrameIndex,
11506  m_FrameInUseCount,
11507  m_BufferImageGranularity,
11508  size,
11509  alignment,
11510  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
11511  suballocType,
11512  canMakeOtherLost,
11513  strategy,
11514  &currRequest))
11515  {
11516  const VkDeviceSize currRequestCost = currRequest.CalcCost();
11517  if(pBestRequestBlock == VMA_NULL ||
11518  currRequestCost < bestRequestCost ||
11520  {
11521  pBestRequestBlock = pCurrBlock;
11522  bestRequest = currRequest;
11523  bestRequestCost = currRequestCost;
11524 
11525  if(bestRequestCost == 0 ||
11527  {
11528  break;
11529  }
11530  }
11531  }
11532  }
11533  }
11534 
11535  if(pBestRequestBlock != VMA_NULL)
11536  {
11537  if(mapped)
11538  {
11539  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
11540  if(res != VK_SUCCESS)
11541  {
11542  return res;
11543  }
11544  }
11545 
11546  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
11547  currentFrameIndex,
11548  m_FrameInUseCount,
11549  &bestRequest))
11550  {
11551  // We no longer have an empty Allocation.
11552  if(pBestRequestBlock->m_pMetadata->IsEmpty())
11553  {
11554  m_HasEmptyBlock = false;
11555  }
11556  // Allocate from this pBlock.
11557  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11558  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, isUpperAddress, *pAllocation);
11559  (*pAllocation)->InitBlockAllocation(
11560  hCurrentPool,
11561  pBestRequestBlock,
11562  bestRequest.offset,
11563  alignment,
11564  size,
11565  suballocType,
11566  mapped,
11567  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11568  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
11569  VMA_DEBUG_LOG(" Returned from existing allocation #%u", (uint32_t)blockIndex);
11570  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
11571  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11572  {
11573  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11574  }
11575  if(IsCorruptionDetectionEnabled())
11576  {
11577  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
11578  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11579  }
11580  return VK_SUCCESS;
11581  }
11582  // else: Some allocations must have been touched while we are here. Next try.
11583  }
11584  else
11585  {
11586  // Could not find place in any of the blocks - break outer loop.
11587  break;
11588  }
11589  }
11590  /* Maximum number of tries exceeded - a very unlike event when many other
11591  threads are simultaneously touching allocations making it impossible to make
11592  lost at the same time as we try to allocate. */
11593  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
11594  {
11595  return VK_ERROR_TOO_MANY_OBJECTS;
11596  }
11597  }
11598 
11599  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11600 }
11601 
11602 void VmaBlockVector::Free(
11603  VmaAllocation hAllocation)
11604 {
11605  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
11606 
11607  // Scope for lock.
11608  {
11609  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
11610 
11611  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
11612 
11613  if(IsCorruptionDetectionEnabled())
11614  {
11615  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
11616  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
11617  }
11618 
11619  if(hAllocation->IsPersistentMap())
11620  {
11621  pBlock->Unmap(m_hAllocator, 1);
11622  }
11623 
11624  pBlock->m_pMetadata->Free(hAllocation);
11625  VMA_HEAVY_ASSERT(pBlock->Validate());
11626 
11627  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", memTypeIndex);
11628 
11629  // pBlock became empty after this deallocation.
11630  if(pBlock->m_pMetadata->IsEmpty())
11631  {
11632  // Already has empty Allocation. We don't want to have two, so delete this one.
11633  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
11634  {
11635  pBlockToDelete = pBlock;
11636  Remove(pBlock);
11637  }
11638  // We now have first empty block.
11639  else
11640  {
11641  m_HasEmptyBlock = true;
11642  }
11643  }
11644  // pBlock didn't become empty, but we have another empty block - find and free that one.
11645  // (This is optional, heuristics.)
11646  else if(m_HasEmptyBlock)
11647  {
11648  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
11649  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
11650  {
11651  pBlockToDelete = pLastBlock;
11652  m_Blocks.pop_back();
11653  m_HasEmptyBlock = false;
11654  }
11655  }
11656 
11657  IncrementallySortBlocks();
11658  }
11659 
11660  // Destruction of a free Allocation. Deferred until this point, outside of mutex
11661  // lock, for performance reason.
11662  if(pBlockToDelete != VMA_NULL)
11663  {
11664  VMA_DEBUG_LOG(" Deleted empty allocation");
11665  pBlockToDelete->Destroy(m_hAllocator);
11666  vma_delete(m_hAllocator, pBlockToDelete);
11667  }
11668 }
11669 
11670 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
11671 {
11672  VkDeviceSize result = 0;
11673  for(size_t i = m_Blocks.size(); i--; )
11674  {
11675  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
11676  if(result >= m_PreferredBlockSize)
11677  {
11678  break;
11679  }
11680  }
11681  return result;
11682 }
11683 
11684 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
11685 {
11686  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
11687  {
11688  if(m_Blocks[blockIndex] == pBlock)
11689  {
11690  VmaVectorRemove(m_Blocks, blockIndex);
11691  return;
11692  }
11693  }
11694  VMA_ASSERT(0);
11695 }
11696 
11697 void VmaBlockVector::IncrementallySortBlocks()
11698 {
11699  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
11700  {
11701  // Bubble sort only until first swap.
11702  for(size_t i = 1; i < m_Blocks.size(); ++i)
11703  {
11704  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
11705  {
11706  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
11707  return;
11708  }
11709  }
11710  }
11711 }
11712 
11713 VkResult VmaBlockVector::AllocateFromBlock(
11714  VmaDeviceMemoryBlock* pBlock,
11715  VmaPool hCurrentPool,
11716  uint32_t currentFrameIndex,
11717  VkDeviceSize size,
11718  VkDeviceSize alignment,
11719  VmaAllocationCreateFlags allocFlags,
11720  void* pUserData,
11721  VmaSuballocationType suballocType,
11722  uint32_t strategy,
11723  VmaAllocation* pAllocation)
11724 {
11725  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
11726  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
11727  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
11728  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
11729 
11730  VmaAllocationRequest currRequest = {};
11731  if(pBlock->m_pMetadata->CreateAllocationRequest(
11732  currentFrameIndex,
11733  m_FrameInUseCount,
11734  m_BufferImageGranularity,
11735  size,
11736  alignment,
11737  isUpperAddress,
11738  suballocType,
11739  false, // canMakeOtherLost
11740  strategy,
11741  &currRequest))
11742  {
11743  // Allocate from pCurrBlock.
11744  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
11745 
11746  if(mapped)
11747  {
11748  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
11749  if(res != VK_SUCCESS)
11750  {
11751  return res;
11752  }
11753  }
11754 
11755  // We no longer have an empty Allocation.
11756  if(pBlock->m_pMetadata->IsEmpty())
11757  {
11758  m_HasEmptyBlock = false;
11759  }
11760 
11761  *pAllocation = vma_new(m_hAllocator, VmaAllocation_T)(currentFrameIndex, isUserDataString);
11762  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, isUpperAddress, *pAllocation);
11763  (*pAllocation)->InitBlockAllocation(
11764  hCurrentPool,
11765  pBlock,
11766  currRequest.offset,
11767  alignment,
11768  size,
11769  suballocType,
11770  mapped,
11771  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
11772  VMA_HEAVY_ASSERT(pBlock->Validate());
11773  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
11774  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
11775  {
11776  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
11777  }
11778  if(IsCorruptionDetectionEnabled())
11779  {
11780  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
11781  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
11782  }
11783  return VK_SUCCESS;
11784  }
11785  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
11786 }
11787 
11788 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
11789 {
11790  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
11791  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
11792  allocInfo.allocationSize = blockSize;
11793  VkDeviceMemory mem = VK_NULL_HANDLE;
11794  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
11795  if(res < 0)
11796  {
11797  return res;
11798  }
11799 
11800  // New VkDeviceMemory successfully created.
11801 
11802  // Create new Allocation for it.
11803  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
11804  pBlock->Init(
11805  m_hAllocator,
11806  m_MemoryTypeIndex,
11807  mem,
11808  allocInfo.allocationSize,
11809  m_NextBlockId++,
11810  m_Algorithm);
11811 
11812  m_Blocks.push_back(pBlock);
11813  if(pNewBlockIndex != VMA_NULL)
11814  {
11815  *pNewBlockIndex = m_Blocks.size() - 1;
11816  }
11817 
11818  return VK_SUCCESS;
11819 }
11820 
11821 void VmaBlockVector::ApplyDefragmentationMovesCpu(
11822  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11823  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
11824 {
11825  const size_t blockCount = m_Blocks.size();
11826  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
11827 
11828  enum BLOCK_FLAG
11829  {
11830  BLOCK_FLAG_USED = 0x00000001,
11831  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
11832  };
11833 
11834  struct BlockInfo
11835  {
11836  uint32_t flags;
11837  void* pMappedData;
11838  };
11839  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
11840  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
11841  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
11842 
11843  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11844  const size_t moveCount = moves.size();
11845  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11846  {
11847  const VmaDefragmentationMove& move = moves[moveIndex];
11848  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
11849  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
11850  }
11851 
11852  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11853 
11854  // Go over all blocks. Get mapped pointer or map if necessary.
11855  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11856  {
11857  BlockInfo& currBlockInfo = blockInfo[blockIndex];
11858  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11859  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
11860  {
11861  currBlockInfo.pMappedData = pBlock->GetMappedData();
11862  // It is not originally mapped - map it.
11863  if(currBlockInfo.pMappedData == VMA_NULL)
11864  {
11865  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
11866  if(pDefragCtx->res == VK_SUCCESS)
11867  {
11868  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
11869  }
11870  }
11871  }
11872  }
11873 
11874  // Go over all moves. Do actual data transfer.
11875  if(pDefragCtx->res == VK_SUCCESS)
11876  {
11877  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
11878  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
11879 
11880  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11881  {
11882  const VmaDefragmentationMove& move = moves[moveIndex];
11883 
11884  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
11885  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
11886 
11887  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
11888 
11889  // Invalidate source.
11890  if(isNonCoherent)
11891  {
11892  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
11893  memRange.memory = pSrcBlock->GetDeviceMemory();
11894  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
11895  memRange.size = VMA_MIN(
11896  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
11897  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
11898  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
11899  }
11900 
11901  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
11902  memmove(
11903  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
11904  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
11905  static_cast<size_t>(move.size));
11906 
11907  if(IsCorruptionDetectionEnabled())
11908  {
11909  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
11910  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
11911  }
11912 
11913  // Flush destination.
11914  if(isNonCoherent)
11915  {
11916  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
11917  memRange.memory = pDstBlock->GetDeviceMemory();
11918  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
11919  memRange.size = VMA_MIN(
11920  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
11921  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
11922  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
11923  }
11924  }
11925  }
11926 
11927  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
11928  // Regardless of pCtx->res == VK_SUCCESS.
11929  for(size_t blockIndex = blockCount; blockIndex--; )
11930  {
11931  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
11932  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
11933  {
11934  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11935  pBlock->Unmap(m_hAllocator, 1);
11936  }
11937  }
11938 }
11939 
11940 void VmaBlockVector::ApplyDefragmentationMovesGpu(
11941  class VmaBlockVectorDefragmentationContext* pDefragCtx,
11942  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
11943  VkCommandBuffer commandBuffer)
11944 {
11945  const size_t blockCount = m_Blocks.size();
11946 
11947  pDefragCtx->blockContexts.resize(blockCount);
11948  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
11949 
11950  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
11951  const size_t moveCount = moves.size();
11952  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11953  {
11954  const VmaDefragmentationMove& move = moves[moveIndex];
11955  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
11956  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
11957  }
11958 
11959  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
11960 
11961  // Go over all blocks. Create and bind buffer for whole block if necessary.
11962  {
11963  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
11964  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
11965  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
11966 
11967  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
11968  {
11969  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
11970  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
11971  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
11972  {
11973  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
11974  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
11975  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
11976  if(pDefragCtx->res == VK_SUCCESS)
11977  {
11978  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
11979  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
11980  }
11981  }
11982  }
11983  }
11984 
11985  // Go over all moves. Post data transfer commands to command buffer.
11986  if(pDefragCtx->res == VK_SUCCESS)
11987  {
11988  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
11989  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
11990 
11991  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
11992  {
11993  const VmaDefragmentationMove& move = moves[moveIndex];
11994 
11995  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
11996  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
11997 
11998  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
11999 
12000  VkBufferCopy region = {
12001  move.srcOffset,
12002  move.dstOffset,
12003  move.size };
12004  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
12005  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
12006  }
12007  }
12008 
12009  // Save buffers to defrag context for later destruction.
12010  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
12011  {
12012  pDefragCtx->res = VK_NOT_READY;
12013  }
12014 }
12015 
12016 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
12017 {
12018  m_HasEmptyBlock = false;
12019  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12020  {
12021  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
12022  if(pBlock->m_pMetadata->IsEmpty())
12023  {
12024  if(m_Blocks.size() > m_MinBlockCount)
12025  {
12026  if(pDefragmentationStats != VMA_NULL)
12027  {
12028  ++pDefragmentationStats->deviceMemoryBlocksFreed;
12029  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
12030  }
12031 
12032  VmaVectorRemove(m_Blocks, blockIndex);
12033  pBlock->Destroy(m_hAllocator);
12034  vma_delete(m_hAllocator, pBlock);
12035  }
12036  else
12037  {
12038  m_HasEmptyBlock = true;
12039  }
12040  }
12041  }
12042 }
12043 
12044 #if VMA_STATS_STRING_ENABLED
12045 
12046 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
12047 {
12048  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12049 
12050  json.BeginObject();
12051 
12052  if(m_IsCustomPool)
12053  {
12054  json.WriteString("MemoryTypeIndex");
12055  json.WriteNumber(m_MemoryTypeIndex);
12056 
12057  json.WriteString("BlockSize");
12058  json.WriteNumber(m_PreferredBlockSize);
12059 
12060  json.WriteString("BlockCount");
12061  json.BeginObject(true);
12062  if(m_MinBlockCount > 0)
12063  {
12064  json.WriteString("Min");
12065  json.WriteNumber((uint64_t)m_MinBlockCount);
12066  }
12067  if(m_MaxBlockCount < SIZE_MAX)
12068  {
12069  json.WriteString("Max");
12070  json.WriteNumber((uint64_t)m_MaxBlockCount);
12071  }
12072  json.WriteString("Cur");
12073  json.WriteNumber((uint64_t)m_Blocks.size());
12074  json.EndObject();
12075 
12076  if(m_FrameInUseCount > 0)
12077  {
12078  json.WriteString("FrameInUseCount");
12079  json.WriteNumber(m_FrameInUseCount);
12080  }
12081 
12082  if(m_Algorithm != 0)
12083  {
12084  json.WriteString("Algorithm");
12085  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
12086  }
12087  }
12088  else
12089  {
12090  json.WriteString("PreferredBlockSize");
12091  json.WriteNumber(m_PreferredBlockSize);
12092  }
12093 
12094  json.WriteString("Blocks");
12095  json.BeginObject();
12096  for(size_t i = 0; i < m_Blocks.size(); ++i)
12097  {
12098  json.BeginString();
12099  json.ContinueString(m_Blocks[i]->GetId());
12100  json.EndString();
12101 
12102  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
12103  }
12104  json.EndObject();
12105 
12106  json.EndObject();
12107 }
12108 
12109 #endif // #if VMA_STATS_STRING_ENABLED
12110 
12111 void VmaBlockVector::Defragment(
12112  class VmaBlockVectorDefragmentationContext* pCtx,
12113  VmaDefragmentationStats* pStats,
12114  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
12115  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
12116  VkCommandBuffer commandBuffer)
12117 {
12118  pCtx->res = VK_SUCCESS;
12119 
12120  const VkMemoryPropertyFlags memPropFlags =
12121  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
12122  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
12123  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
12124 
12125  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
12126  isHostVisible;
12127  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
12128  (VMA_DEBUG_DETECT_CORRUPTION == 0 || !(isHostVisible && isHostCoherent));
12129 
12130  // There are options to defragment this memory type.
12131  if(canDefragmentOnCpu || canDefragmentOnGpu)
12132  {
12133  bool defragmentOnGpu;
12134  // There is only one option to defragment this memory type.
12135  if(canDefragmentOnGpu != canDefragmentOnCpu)
12136  {
12137  defragmentOnGpu = canDefragmentOnGpu;
12138  }
12139  // Both options are available: Heuristics to choose the best one.
12140  else
12141  {
12142  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
12143  m_hAllocator->IsIntegratedGpu();
12144  }
12145 
12146  bool overlappingMoveSupported = !defragmentOnGpu;
12147 
12148  if(m_hAllocator->m_UseMutex)
12149  {
12150  m_Mutex.LockWrite();
12151  pCtx->mutexLocked = true;
12152  }
12153 
12154  pCtx->Begin(overlappingMoveSupported);
12155 
12156  // Defragment.
12157 
12158  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
12159  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
12160  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
12161  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
12162  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
12163 
12164  // Accumulate statistics.
12165  if(pStats != VMA_NULL)
12166  {
12167  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
12168  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
12169  pStats->bytesMoved += bytesMoved;
12170  pStats->allocationsMoved += allocationsMoved;
12171  VMA_ASSERT(bytesMoved <= maxBytesToMove);
12172  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
12173  if(defragmentOnGpu)
12174  {
12175  maxGpuBytesToMove -= bytesMoved;
12176  maxGpuAllocationsToMove -= allocationsMoved;
12177  }
12178  else
12179  {
12180  maxCpuBytesToMove -= bytesMoved;
12181  maxCpuAllocationsToMove -= allocationsMoved;
12182  }
12183  }
12184 
12185  if(pCtx->res >= VK_SUCCESS)
12186  {
12187  if(defragmentOnGpu)
12188  {
12189  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
12190  }
12191  else
12192  {
12193  ApplyDefragmentationMovesCpu(pCtx, moves);
12194  }
12195  }
12196  }
12197 }
12198 
12199 void VmaBlockVector::DefragmentationEnd(
12200  class VmaBlockVectorDefragmentationContext* pCtx,
12201  VmaDefragmentationStats* pStats)
12202 {
12203  // Destroy buffers.
12204  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
12205  {
12206  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
12207  if(blockCtx.hBuffer)
12208  {
12209  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
12210  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
12211  }
12212  }
12213 
12214  if(pCtx->res >= VK_SUCCESS)
12215  {
12216  FreeEmptyBlocks(pStats);
12217  }
12218 
12219  if(pCtx->mutexLocked)
12220  {
12221  VMA_ASSERT(m_hAllocator->m_UseMutex);
12222  m_Mutex.UnlockWrite();
12223  }
12224 }
12225 
12226 size_t VmaBlockVector::CalcAllocationCount() const
12227 {
12228  size_t result = 0;
12229  for(size_t i = 0; i < m_Blocks.size(); ++i)
12230  {
12231  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
12232  }
12233  return result;
12234 }
12235 
12236 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
12237 {
12238  if(m_BufferImageGranularity == 1)
12239  {
12240  return false;
12241  }
12242  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
12243  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
12244  {
12245  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
12246  VMA_ASSERT(m_Algorithm == 0);
12247  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
12248  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
12249  {
12250  return true;
12251  }
12252  }
12253  return false;
12254 }
12255 
12256 void VmaBlockVector::MakePoolAllocationsLost(
12257  uint32_t currentFrameIndex,
12258  size_t* pLostAllocationCount)
12259 {
12260  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12261  size_t lostAllocationCount = 0;
12262  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12263  {
12264  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12265  VMA_ASSERT(pBlock);
12266  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
12267  }
12268  if(pLostAllocationCount != VMA_NULL)
12269  {
12270  *pLostAllocationCount = lostAllocationCount;
12271  }
12272 }
12273 
12274 VkResult VmaBlockVector::CheckCorruption()
12275 {
12276  if(!IsCorruptionDetectionEnabled())
12277  {
12278  return VK_ERROR_FEATURE_NOT_PRESENT;
12279  }
12280 
12281  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12282  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12283  {
12284  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12285  VMA_ASSERT(pBlock);
12286  VkResult res = pBlock->CheckCorruption(m_hAllocator);
12287  if(res != VK_SUCCESS)
12288  {
12289  return res;
12290  }
12291  }
12292  return VK_SUCCESS;
12293 }
12294 
12295 void VmaBlockVector::AddStats(VmaStats* pStats)
12296 {
12297  const uint32_t memTypeIndex = m_MemoryTypeIndex;
12298  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
12299 
12300  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12301 
12302  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
12303  {
12304  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12305  VMA_ASSERT(pBlock);
12306  VMA_HEAVY_ASSERT(pBlock->Validate());
12307  VmaStatInfo allocationStatInfo;
12308  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
12309  VmaAddStatInfo(pStats->total, allocationStatInfo);
12310  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
12311  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
12312  }
12313 }
12314 
12316 // VmaDefragmentationAlgorithm_Generic members definition
12317 
12318 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
12319  VmaAllocator hAllocator,
12320  VmaBlockVector* pBlockVector,
12321  uint32_t currentFrameIndex,
12322  bool overlappingMoveSupported) :
12323  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12324  m_AllAllocations(false),
12325  m_AllocationCount(0),
12326  m_BytesMoved(0),
12327  m_AllocationsMoved(0),
12328  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
12329 {
12330  // Create block info for each block.
12331  const size_t blockCount = m_pBlockVector->m_Blocks.size();
12332  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12333  {
12334  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
12335  pBlockInfo->m_OriginalBlockIndex = blockIndex;
12336  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
12337  m_Blocks.push_back(pBlockInfo);
12338  }
12339 
12340  // Sort them by m_pBlock pointer value.
12341  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
12342 }
12343 
12344 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
12345 {
12346  for(size_t i = m_Blocks.size(); i--; )
12347  {
12348  vma_delete(m_hAllocator, m_Blocks[i]);
12349  }
12350 }
12351 
12352 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
12353 {
12354  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
12355  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
12356  {
12357  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
12358  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
12359  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
12360  {
12361  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
12362  (*it)->m_Allocations.push_back(allocInfo);
12363  }
12364  else
12365  {
12366  VMA_ASSERT(0);
12367  }
12368 
12369  ++m_AllocationCount;
12370  }
12371 }
12372 
12373 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
12374  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12375  VkDeviceSize maxBytesToMove,
12376  uint32_t maxAllocationsToMove)
12377 {
12378  if(m_Blocks.empty())
12379  {
12380  return VK_SUCCESS;
12381  }
12382 
12383  // This is a choice based on research.
12384  // Option 1:
12385  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
12386  // Option 2:
12387  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
12388  // Option 3:
12389  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
12390 
12391  size_t srcBlockMinIndex = 0;
12392  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
12393  /*
12394  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
12395  {
12396  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
12397  if(blocksWithNonMovableCount > 0)
12398  {
12399  srcBlockMinIndex = blocksWithNonMovableCount - 1;
12400  }
12401  }
12402  */
12403 
12404  size_t srcBlockIndex = m_Blocks.size() - 1;
12405  size_t srcAllocIndex = SIZE_MAX;
12406  for(;;)
12407  {
12408  // 1. Find next allocation to move.
12409  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
12410  // 1.2. Then start from last to first m_Allocations.
12411  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
12412  {
12413  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
12414  {
12415  // Finished: no more allocations to process.
12416  if(srcBlockIndex == srcBlockMinIndex)
12417  {
12418  return VK_SUCCESS;
12419  }
12420  else
12421  {
12422  --srcBlockIndex;
12423  srcAllocIndex = SIZE_MAX;
12424  }
12425  }
12426  else
12427  {
12428  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
12429  }
12430  }
12431 
12432  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
12433  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
12434 
12435  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
12436  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
12437  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
12438  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
12439 
12440  // 2. Try to find new place for this allocation in preceding or current block.
12441  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
12442  {
12443  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
12444  VmaAllocationRequest dstAllocRequest;
12445  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
12446  m_CurrentFrameIndex,
12447  m_pBlockVector->GetFrameInUseCount(),
12448  m_pBlockVector->GetBufferImageGranularity(),
12449  size,
12450  alignment,
12451  false, // upperAddress
12452  suballocType,
12453  false, // canMakeOtherLost
12454  strategy,
12455  &dstAllocRequest) &&
12456  MoveMakesSense(
12457  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
12458  {
12459  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
12460 
12461  // Reached limit on number of allocations or bytes to move.
12462  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
12463  (m_BytesMoved + size > maxBytesToMove))
12464  {
12465  return VK_SUCCESS;
12466  }
12467 
12468  VmaDefragmentationMove move;
12469  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
12470  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
12471  move.srcOffset = srcOffset;
12472  move.dstOffset = dstAllocRequest.offset;
12473  move.size = size;
12474  moves.push_back(move);
12475 
12476  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
12477  dstAllocRequest,
12478  suballocType,
12479  size,
12480  false, // upperAddress
12481  allocInfo.m_hAllocation);
12482  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
12483 
12484  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
12485 
12486  if(allocInfo.m_pChanged != VMA_NULL)
12487  {
12488  *allocInfo.m_pChanged = VK_TRUE;
12489  }
12490 
12491  ++m_AllocationsMoved;
12492  m_BytesMoved += size;
12493 
12494  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
12495 
12496  break;
12497  }
12498  }
12499 
12500  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
12501 
12502  if(srcAllocIndex > 0)
12503  {
12504  --srcAllocIndex;
12505  }
12506  else
12507  {
12508  if(srcBlockIndex > 0)
12509  {
12510  --srcBlockIndex;
12511  srcAllocIndex = SIZE_MAX;
12512  }
12513  else
12514  {
12515  return VK_SUCCESS;
12516  }
12517  }
12518  }
12519 }
12520 
12521 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
12522 {
12523  size_t result = 0;
12524  for(size_t i = 0; i < m_Blocks.size(); ++i)
12525  {
12526  if(m_Blocks[i]->m_HasNonMovableAllocations)
12527  {
12528  ++result;
12529  }
12530  }
12531  return result;
12532 }
12533 
12534 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
12535  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12536  VkDeviceSize maxBytesToMove,
12537  uint32_t maxAllocationsToMove)
12538 {
12539  if(!m_AllAllocations && m_AllocationCount == 0)
12540  {
12541  return VK_SUCCESS;
12542  }
12543 
12544  const size_t blockCount = m_Blocks.size();
12545  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12546  {
12547  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
12548 
12549  if(m_AllAllocations)
12550  {
12551  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
12552  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
12553  it != pMetadata->m_Suballocations.end();
12554  ++it)
12555  {
12556  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
12557  {
12558  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
12559  pBlockInfo->m_Allocations.push_back(allocInfo);
12560  }
12561  }
12562  }
12563 
12564  pBlockInfo->CalcHasNonMovableAllocations();
12565 
12566  // This is a choice based on research.
12567  // Option 1:
12568  pBlockInfo->SortAllocationsByOffsetDescending();
12569  // Option 2:
12570  //pBlockInfo->SortAllocationsBySizeDescending();
12571  }
12572 
12573  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
12574  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
12575 
12576  // This is a choice based on research.
12577  const uint32_t roundCount = 2;
12578 
12579  // Execute defragmentation rounds (the main part).
12580  VkResult result = VK_SUCCESS;
12581  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
12582  {
12583  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
12584  }
12585 
12586  return result;
12587 }
12588 
12589 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
12590  size_t dstBlockIndex, VkDeviceSize dstOffset,
12591  size_t srcBlockIndex, VkDeviceSize srcOffset)
12592 {
12593  if(dstBlockIndex < srcBlockIndex)
12594  {
12595  return true;
12596  }
12597  if(dstBlockIndex > srcBlockIndex)
12598  {
12599  return false;
12600  }
12601  if(dstOffset < srcOffset)
12602  {
12603  return true;
12604  }
12605  return false;
12606 }
12607 
12609 // VmaDefragmentationAlgorithm_Fast
12610 
12611 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
12612  VmaAllocator hAllocator,
12613  VmaBlockVector* pBlockVector,
12614  uint32_t currentFrameIndex,
12615  bool overlappingMoveSupported) :
12616  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
12617  m_OverlappingMoveSupported(overlappingMoveSupported),
12618  m_AllocationCount(0),
12619  m_AllAllocations(false),
12620  m_BytesMoved(0),
12621  m_AllocationsMoved(0),
12622  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
12623 {
12624  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
12625 
12626 }
12627 
12628 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
12629 {
12630 }
12631 
12632 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
12633  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
12634  VkDeviceSize maxBytesToMove,
12635  uint32_t maxAllocationsToMove)
12636 {
12637  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
12638 
12639  const size_t blockCount = m_pBlockVector->GetBlockCount();
12640  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
12641  {
12642  return VK_SUCCESS;
12643  }
12644 
12645  PreprocessMetadata();
12646 
12647  // Sort blocks in order from most destination.
12648 
12649  m_BlockInfos.resize(blockCount);
12650  for(size_t i = 0; i < blockCount; ++i)
12651  {
12652  m_BlockInfos[i].origBlockIndex = i;
12653  }
12654 
12655  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
12656  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
12657  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
12658  });
12659 
12660  // THE MAIN ALGORITHM
12661 
12662  FreeSpaceDatabase freeSpaceDb;
12663 
12664  size_t dstBlockInfoIndex = 0;
12665  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12666  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12667  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12668  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
12669  VkDeviceSize dstOffset = 0;
12670 
12671  bool end = false;
12672  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
12673  {
12674  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
12675  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
12676  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
12677  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
12678  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
12679  {
12680  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
12681  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
12682  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
12683  if(m_AllocationsMoved == maxAllocationsToMove ||
12684  m_BytesMoved + srcAllocSize > maxBytesToMove)
12685  {
12686  end = true;
12687  break;
12688  }
12689  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
12690 
12691  // Try to place it in one of free spaces from the database.
12692  size_t freeSpaceInfoIndex;
12693  VkDeviceSize dstAllocOffset;
12694  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
12695  freeSpaceInfoIndex, dstAllocOffset))
12696  {
12697  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
12698  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
12699  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
12700  VkDeviceSize freeSpaceBlockSize = pFreeSpaceMetadata->GetSize();
12701 
12702  // Same block
12703  if(freeSpaceInfoIndex == srcBlockInfoIndex)
12704  {
12705  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12706 
12707  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12708 
12709  VmaSuballocation suballoc = *srcSuballocIt;
12710  suballoc.offset = dstAllocOffset;
12711  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
12712  m_BytesMoved += srcAllocSize;
12713  ++m_AllocationsMoved;
12714 
12715  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12716  ++nextSuballocIt;
12717  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12718  srcSuballocIt = nextSuballocIt;
12719 
12720  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12721 
12722  VmaDefragmentationMove move = {
12723  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12724  srcAllocOffset, dstAllocOffset,
12725  srcAllocSize };
12726  moves.push_back(move);
12727  }
12728  // Different block
12729  else
12730  {
12731  // MOVE OPTION 2: Move the allocation to a different block.
12732 
12733  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
12734 
12735  VmaSuballocation suballoc = *srcSuballocIt;
12736  suballoc.offset = dstAllocOffset;
12737  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
12738  m_BytesMoved += srcAllocSize;
12739  ++m_AllocationsMoved;
12740 
12741  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12742  ++nextSuballocIt;
12743  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12744  srcSuballocIt = nextSuballocIt;
12745 
12746  InsertSuballoc(pFreeSpaceMetadata, suballoc);
12747 
12748  VmaDefragmentationMove move = {
12749  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
12750  srcAllocOffset, dstAllocOffset,
12751  srcAllocSize };
12752  moves.push_back(move);
12753  }
12754  }
12755  else
12756  {
12757  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
12758 
12759  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
12760  while(dstBlockInfoIndex < srcBlockInfoIndex &&
12761  dstAllocOffset + srcAllocSize > dstBlockSize)
12762  {
12763  // But before that, register remaining free space at the end of dst block.
12764  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
12765 
12766  ++dstBlockInfoIndex;
12767  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
12768  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
12769  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
12770  dstBlockSize = pDstMetadata->GetSize();
12771  dstOffset = 0;
12772  dstAllocOffset = 0;
12773  }
12774 
12775  // Same block
12776  if(dstBlockInfoIndex == srcBlockInfoIndex)
12777  {
12778  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
12779 
12780  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
12781 
12782  bool skipOver = overlap;
12783  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
12784  {
12785  // If destination and source place overlap, skip if it would move it
12786  // by only < 1/64 of its size.
12787  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
12788  }
12789 
12790  if(skipOver)
12791  {
12792  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
12793 
12794  dstOffset = srcAllocOffset + srcAllocSize;
12795  ++srcSuballocIt;
12796  }
12797  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
12798  else
12799  {
12800  srcSuballocIt->offset = dstAllocOffset;
12801  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
12802  dstOffset = dstAllocOffset + srcAllocSize;
12803  m_BytesMoved += srcAllocSize;
12804  ++m_AllocationsMoved;
12805  ++srcSuballocIt;
12806  VmaDefragmentationMove move = {
12807  srcOrigBlockIndex, dstOrigBlockIndex,
12808  srcAllocOffset, dstAllocOffset,
12809  srcAllocSize };
12810  moves.push_back(move);
12811  }
12812  }
12813  // Different block
12814  else
12815  {
12816  // MOVE OPTION 2: Move the allocation to a different block.
12817 
12818  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
12819  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
12820 
12821  VmaSuballocation suballoc = *srcSuballocIt;
12822  suballoc.offset = dstAllocOffset;
12823  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
12824  dstOffset = dstAllocOffset + srcAllocSize;
12825  m_BytesMoved += srcAllocSize;
12826  ++m_AllocationsMoved;
12827 
12828  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
12829  ++nextSuballocIt;
12830  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
12831  srcSuballocIt = nextSuballocIt;
12832 
12833  pDstMetadata->m_Suballocations.push_back(suballoc);
12834 
12835  VmaDefragmentationMove move = {
12836  srcOrigBlockIndex, dstOrigBlockIndex,
12837  srcAllocOffset, dstAllocOffset,
12838  srcAllocSize };
12839  moves.push_back(move);
12840  }
12841  }
12842  }
12843  }
12844 
12845  m_BlockInfos.clear();
12846 
12847  PostprocessMetadata();
12848 
12849  return VK_SUCCESS;
12850 }
12851 
12852 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
12853 {
12854  const size_t blockCount = m_pBlockVector->GetBlockCount();
12855  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12856  {
12857  VmaBlockMetadata_Generic* const pMetadata =
12858  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12859  pMetadata->m_FreeCount = 0;
12860  pMetadata->m_SumFreeSize = pMetadata->GetSize();
12861  pMetadata->m_FreeSuballocationsBySize.clear();
12862  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12863  it != pMetadata->m_Suballocations.end(); )
12864  {
12865  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
12866  {
12867  VmaSuballocationList::iterator nextIt = it;
12868  ++nextIt;
12869  pMetadata->m_Suballocations.erase(it);
12870  it = nextIt;
12871  }
12872  else
12873  {
12874  ++it;
12875  }
12876  }
12877  }
12878 }
12879 
12880 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
12881 {
12882  const size_t blockCount = m_pBlockVector->GetBlockCount();
12883  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12884  {
12885  VmaBlockMetadata_Generic* const pMetadata =
12886  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
12887  const VkDeviceSize blockSize = pMetadata->GetSize();
12888 
12889  // No allocations in this block - entire area is free.
12890  if(pMetadata->m_Suballocations.empty())
12891  {
12892  pMetadata->m_FreeCount = 1;
12893  //pMetadata->m_SumFreeSize is already set to blockSize.
12894  VmaSuballocation suballoc = {
12895  0, // offset
12896  blockSize, // size
12897  VMA_NULL, // hAllocation
12898  VMA_SUBALLOCATION_TYPE_FREE };
12899  pMetadata->m_Suballocations.push_back(suballoc);
12900  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
12901  }
12902  // There are some allocations in this block.
12903  else
12904  {
12905  VkDeviceSize offset = 0;
12906  VmaSuballocationList::iterator it;
12907  for(it = pMetadata->m_Suballocations.begin();
12908  it != pMetadata->m_Suballocations.end();
12909  ++it)
12910  {
12911  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
12912  VMA_ASSERT(it->offset >= offset);
12913 
12914  // Need to insert preceding free space.
12915  if(it->offset > offset)
12916  {
12917  ++pMetadata->m_FreeCount;
12918  const VkDeviceSize freeSize = it->offset - offset;
12919  VmaSuballocation suballoc = {
12920  offset, // offset
12921  freeSize, // size
12922  VMA_NULL, // hAllocation
12923  VMA_SUBALLOCATION_TYPE_FREE };
12924  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
12925  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
12926  {
12927  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
12928  }
12929  }
12930 
12931  pMetadata->m_SumFreeSize -= it->size;
12932  offset = it->offset + it->size;
12933  }
12934 
12935  // Need to insert trailing free space.
12936  if(offset < blockSize)
12937  {
12938  ++pMetadata->m_FreeCount;
12939  const VkDeviceSize freeSize = blockSize - offset;
12940  VmaSuballocation suballoc = {
12941  offset, // offset
12942  freeSize, // size
12943  VMA_NULL, // hAllocation
12944  VMA_SUBALLOCATION_TYPE_FREE };
12945  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
12946  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
12947  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
12948  {
12949  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
12950  }
12951  }
12952 
12953  VMA_SORT(
12954  pMetadata->m_FreeSuballocationsBySize.begin(),
12955  pMetadata->m_FreeSuballocationsBySize.end(),
12956  VmaSuballocationItemSizeLess());
12957  }
12958 
12959  VMA_HEAVY_ASSERT(pMetadata->Validate());
12960  }
12961 }
12962 
12963 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
12964 {
12965  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
12966  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
12967  while(it != pMetadata->m_Suballocations.end())
12968  {
12969  if(it->offset < suballoc.offset)
12970  {
12971  ++it;
12972  }
12973  }
12974  pMetadata->m_Suballocations.insert(it, suballoc);
12975 }
12976 
12978 // VmaBlockVectorDefragmentationContext
12979 
12980 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
12981  VmaAllocator hAllocator,
12982  VmaPool hCustomPool,
12983  VmaBlockVector* pBlockVector,
12984  uint32_t currFrameIndex,
12985  uint32_t algorithmFlags) :
12986  res(VK_SUCCESS),
12987  mutexLocked(false),
12988  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
12989  m_hAllocator(hAllocator),
12990  m_hCustomPool(hCustomPool),
12991  m_pBlockVector(pBlockVector),
12992  m_CurrFrameIndex(currFrameIndex),
12993  m_AlgorithmFlags(algorithmFlags),
12994  m_pAlgorithm(VMA_NULL),
12995  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
12996  m_AllAllocations(false)
12997 {
12998 }
12999 
13000 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
13001 {
13002  vma_delete(m_hAllocator, m_pAlgorithm);
13003 }
13004 
13005 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13006 {
13007  AllocInfo info = { hAlloc, pChanged };
13008  m_Allocations.push_back(info);
13009 }
13010 
13011 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
13012 {
13013  const bool allAllocations = m_AllAllocations ||
13014  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
13015 
13016  /********************************
13017  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
13018  ********************************/
13019 
13020  /*
13021  Fast algorithm is supported only when certain criteria are met:
13022  - VMA_DEBUG_MARGIN is 0.
13023  - All allocations in this block vector are moveable.
13024  - There is no possibility of image/buffer granularity conflict.
13025  */
13026  if(VMA_DEBUG_MARGIN == 0 &&
13027  allAllocations &&
13028  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
13029  {
13030  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
13031  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13032  }
13033  else
13034  {
13035  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
13036  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
13037  }
13038 
13039  if(allAllocations)
13040  {
13041  m_pAlgorithm->AddAll();
13042  }
13043  else
13044  {
13045  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
13046  {
13047  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
13048  }
13049  }
13050 }
13051 
13053 // VmaDefragmentationContext
13054 
13055 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
13056  VmaAllocator hAllocator,
13057  uint32_t currFrameIndex,
13058  uint32_t flags,
13059  VmaDefragmentationStats* pStats) :
13060  m_hAllocator(hAllocator),
13061  m_CurrFrameIndex(currFrameIndex),
13062  m_Flags(flags),
13063  m_pStats(pStats),
13064  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
13065 {
13066  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
13067 }
13068 
13069 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
13070 {
13071  for(size_t i = m_CustomPoolContexts.size(); i--; )
13072  {
13073  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
13074  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13075  vma_delete(m_hAllocator, pBlockVectorCtx);
13076  }
13077  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
13078  {
13079  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
13080  if(pBlockVectorCtx)
13081  {
13082  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
13083  vma_delete(m_hAllocator, pBlockVectorCtx);
13084  }
13085  }
13086 }
13087 
13088 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
13089 {
13090  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
13091  {
13092  VmaPool pool = pPools[poolIndex];
13093  VMA_ASSERT(pool);
13094  // Pools with algorithm other than default are not defragmented.
13095  if(pool->m_BlockVector.GetAlgorithm() == 0)
13096  {
13097  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13098 
13099  for(size_t i = m_CustomPoolContexts.size(); i--; )
13100  {
13101  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
13102  {
13103  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13104  break;
13105  }
13106  }
13107 
13108  if(!pBlockVectorDefragCtx)
13109  {
13110  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13111  m_hAllocator,
13112  pool,
13113  &pool->m_BlockVector,
13114  m_CurrFrameIndex,
13115  m_Flags);
13116  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13117  }
13118 
13119  pBlockVectorDefragCtx->AddAll();
13120  }
13121  }
13122 }
13123 
13124 void VmaDefragmentationContext_T::AddAllocations(
13125  uint32_t allocationCount,
13126  VmaAllocation* pAllocations,
13127  VkBool32* pAllocationsChanged)
13128 {
13129  // Dispatch pAllocations among defragmentators. Create them when necessary.
13130  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13131  {
13132  const VmaAllocation hAlloc = pAllocations[allocIndex];
13133  VMA_ASSERT(hAlloc);
13134  // DedicatedAlloc cannot be defragmented.
13135  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
13136  // Lost allocation cannot be defragmented.
13137  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
13138  {
13139  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
13140 
13141  const VmaPool hAllocPool = hAlloc->GetPool();
13142  // This allocation belongs to custom pool.
13143  if(hAllocPool != VK_NULL_HANDLE)
13144  {
13145  // Pools with algorithm other than default are not defragmented.
13146  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
13147  {
13148  for(size_t i = m_CustomPoolContexts.size(); i--; )
13149  {
13150  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
13151  {
13152  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
13153  break;
13154  }
13155  }
13156  if(!pBlockVectorDefragCtx)
13157  {
13158  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13159  m_hAllocator,
13160  hAllocPool,
13161  &hAllocPool->m_BlockVector,
13162  m_CurrFrameIndex,
13163  m_Flags);
13164  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
13165  }
13166  }
13167  }
13168  // This allocation belongs to default pool.
13169  else
13170  {
13171  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
13172  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
13173  if(!pBlockVectorDefragCtx)
13174  {
13175  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
13176  m_hAllocator,
13177  VMA_NULL, // hCustomPool
13178  m_hAllocator->m_pBlockVectors[memTypeIndex],
13179  m_CurrFrameIndex,
13180  m_Flags);
13181  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
13182  }
13183  }
13184 
13185  if(pBlockVectorDefragCtx)
13186  {
13187  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
13188  &pAllocationsChanged[allocIndex] : VMA_NULL;
13189  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
13190  }
13191  }
13192  }
13193 }
13194 
13195 VkResult VmaDefragmentationContext_T::Defragment(
13196  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
13197  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
13198  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
13199 {
13200  if(pStats)
13201  {
13202  memset(pStats, 0, sizeof(VmaDefragmentationStats));
13203  }
13204 
13205  if(commandBuffer == VK_NULL_HANDLE)
13206  {
13207  maxGpuBytesToMove = 0;
13208  maxGpuAllocationsToMove = 0;
13209  }
13210 
13211  VkResult res = VK_SUCCESS;
13212 
13213  // Process default pools.
13214  for(uint32_t memTypeIndex = 0;
13215  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
13216  ++memTypeIndex)
13217  {
13218  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
13219  if(pBlockVectorCtx)
13220  {
13221  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
13222  pBlockVectorCtx->GetBlockVector()->Defragment(
13223  pBlockVectorCtx,
13224  pStats,
13225  maxCpuBytesToMove, maxCpuAllocationsToMove,
13226  maxGpuBytesToMove, maxGpuAllocationsToMove,
13227  commandBuffer);
13228  if(pBlockVectorCtx->res != VK_SUCCESS)
13229  {
13230  res = pBlockVectorCtx->res;
13231  }
13232  }
13233  }
13234 
13235  // Process custom pools.
13236  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
13237  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
13238  ++customCtxIndex)
13239  {
13240  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
13241  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
13242  pBlockVectorCtx->GetBlockVector()->Defragment(
13243  pBlockVectorCtx,
13244  pStats,
13245  maxCpuBytesToMove, maxCpuAllocationsToMove,
13246  maxGpuBytesToMove, maxGpuAllocationsToMove,
13247  commandBuffer);
13248  if(pBlockVectorCtx->res != VK_SUCCESS)
13249  {
13250  res = pBlockVectorCtx->res;
13251  }
13252  }
13253 
13254  return res;
13255 }
13256 
13258 // VmaRecorder
13259 
13260 #if VMA_RECORDING_ENABLED
13261 
13262 VmaRecorder::VmaRecorder() :
13263  m_UseMutex(true),
13264  m_Flags(0),
13265  m_File(VMA_NULL),
13266  m_Freq(INT64_MAX),
13267  m_StartCounter(INT64_MAX)
13268 {
13269 }
13270 
13271 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
13272 {
13273  m_UseMutex = useMutex;
13274  m_Flags = settings.flags;
13275 
13276  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
13277  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
13278 
13279  // Open file for writing.
13280  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
13281  if(err != 0)
13282  {
13283  return VK_ERROR_INITIALIZATION_FAILED;
13284  }
13285 
13286  // Write header.
13287  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
13288  fprintf(m_File, "%s\n", "1,5");
13289 
13290  return VK_SUCCESS;
13291 }
13292 
13293 VmaRecorder::~VmaRecorder()
13294 {
13295  if(m_File != VMA_NULL)
13296  {
13297  fclose(m_File);
13298  }
13299 }
13300 
13301 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
13302 {
13303  CallParams callParams;
13304  GetBasicParams(callParams);
13305 
13306  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13307  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
13308  Flush();
13309 }
13310 
13311 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
13312 {
13313  CallParams callParams;
13314  GetBasicParams(callParams);
13315 
13316  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13317  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
13318  Flush();
13319 }
13320 
13321 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
13322 {
13323  CallParams callParams;
13324  GetBasicParams(callParams);
13325 
13326  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13327  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
13328  createInfo.memoryTypeIndex,
13329  createInfo.flags,
13330  createInfo.blockSize,
13331  (uint64_t)createInfo.minBlockCount,
13332  (uint64_t)createInfo.maxBlockCount,
13333  createInfo.frameInUseCount,
13334  pool);
13335  Flush();
13336 }
13337 
13338 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
13339 {
13340  CallParams callParams;
13341  GetBasicParams(callParams);
13342 
13343  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13344  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
13345  pool);
13346  Flush();
13347 }
13348 
13349 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
13350  const VkMemoryRequirements& vkMemReq,
13351  const VmaAllocationCreateInfo& createInfo,
13352  VmaAllocation allocation)
13353 {
13354  CallParams callParams;
13355  GetBasicParams(callParams);
13356 
13357  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13358  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13359  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13360  vkMemReq.size,
13361  vkMemReq.alignment,
13362  vkMemReq.memoryTypeBits,
13363  createInfo.flags,
13364  createInfo.usage,
13365  createInfo.requiredFlags,
13366  createInfo.preferredFlags,
13367  createInfo.memoryTypeBits,
13368  createInfo.pool,
13369  allocation,
13370  userDataStr.GetString());
13371  Flush();
13372 }
13373 
13374 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
13375  const VkMemoryRequirements& vkMemReq,
13376  bool requiresDedicatedAllocation,
13377  bool prefersDedicatedAllocation,
13378  const VmaAllocationCreateInfo& createInfo,
13379  VmaAllocation allocation)
13380 {
13381  CallParams callParams;
13382  GetBasicParams(callParams);
13383 
13384  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13385  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13386  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13387  vkMemReq.size,
13388  vkMemReq.alignment,
13389  vkMemReq.memoryTypeBits,
13390  requiresDedicatedAllocation ? 1 : 0,
13391  prefersDedicatedAllocation ? 1 : 0,
13392  createInfo.flags,
13393  createInfo.usage,
13394  createInfo.requiredFlags,
13395  createInfo.preferredFlags,
13396  createInfo.memoryTypeBits,
13397  createInfo.pool,
13398  allocation,
13399  userDataStr.GetString());
13400  Flush();
13401 }
13402 
13403 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
13404  const VkMemoryRequirements& vkMemReq,
13405  bool requiresDedicatedAllocation,
13406  bool prefersDedicatedAllocation,
13407  const VmaAllocationCreateInfo& createInfo,
13408  VmaAllocation allocation)
13409 {
13410  CallParams callParams;
13411  GetBasicParams(callParams);
13412 
13413  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13414  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
13415  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13416  vkMemReq.size,
13417  vkMemReq.alignment,
13418  vkMemReq.memoryTypeBits,
13419  requiresDedicatedAllocation ? 1 : 0,
13420  prefersDedicatedAllocation ? 1 : 0,
13421  createInfo.flags,
13422  createInfo.usage,
13423  createInfo.requiredFlags,
13424  createInfo.preferredFlags,
13425  createInfo.memoryTypeBits,
13426  createInfo.pool,
13427  allocation,
13428  userDataStr.GetString());
13429  Flush();
13430 }
13431 
13432 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
13433  VmaAllocation allocation)
13434 {
13435  CallParams callParams;
13436  GetBasicParams(callParams);
13437 
13438  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13439  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13440  allocation);
13441  Flush();
13442 }
13443 
13444 void VmaRecorder::RecordResizeAllocation(
13445  uint32_t frameIndex,
13446  VmaAllocation allocation,
13447  VkDeviceSize newSize)
13448 {
13449  CallParams callParams;
13450  GetBasicParams(callParams);
13451 
13452  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13453  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
13454  allocation, newSize);
13455  Flush();
13456 }
13457 
13458 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
13459  VmaAllocation allocation,
13460  const void* pUserData)
13461 {
13462  CallParams callParams;
13463  GetBasicParams(callParams);
13464 
13465  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13466  UserDataString userDataStr(
13467  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
13468  pUserData);
13469  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13470  allocation,
13471  userDataStr.GetString());
13472  Flush();
13473 }
13474 
13475 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
13476  VmaAllocation allocation)
13477 {
13478  CallParams callParams;
13479  GetBasicParams(callParams);
13480 
13481  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13482  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13483  allocation);
13484  Flush();
13485 }
13486 
13487 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
13488  VmaAllocation allocation)
13489 {
13490  CallParams callParams;
13491  GetBasicParams(callParams);
13492 
13493  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13494  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13495  allocation);
13496  Flush();
13497 }
13498 
13499 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
13500  VmaAllocation allocation)
13501 {
13502  CallParams callParams;
13503  GetBasicParams(callParams);
13504 
13505  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13506  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
13507  allocation);
13508  Flush();
13509 }
13510 
13511 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
13512  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13513 {
13514  CallParams callParams;
13515  GetBasicParams(callParams);
13516 
13517  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13518  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13519  allocation,
13520  offset,
13521  size);
13522  Flush();
13523 }
13524 
13525 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
13526  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
13527 {
13528  CallParams callParams;
13529  GetBasicParams(callParams);
13530 
13531  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13532  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
13533  allocation,
13534  offset,
13535  size);
13536  Flush();
13537 }
13538 
13539 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
13540  const VkBufferCreateInfo& bufCreateInfo,
13541  const VmaAllocationCreateInfo& allocCreateInfo,
13542  VmaAllocation allocation)
13543 {
13544  CallParams callParams;
13545  GetBasicParams(callParams);
13546 
13547  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13548  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13549  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13550  bufCreateInfo.flags,
13551  bufCreateInfo.size,
13552  bufCreateInfo.usage,
13553  bufCreateInfo.sharingMode,
13554  allocCreateInfo.flags,
13555  allocCreateInfo.usage,
13556  allocCreateInfo.requiredFlags,
13557  allocCreateInfo.preferredFlags,
13558  allocCreateInfo.memoryTypeBits,
13559  allocCreateInfo.pool,
13560  allocation,
13561  userDataStr.GetString());
13562  Flush();
13563 }
13564 
13565 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
13566  const VkImageCreateInfo& imageCreateInfo,
13567  const VmaAllocationCreateInfo& allocCreateInfo,
13568  VmaAllocation allocation)
13569 {
13570  CallParams callParams;
13571  GetBasicParams(callParams);
13572 
13573  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13574  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
13575  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
13576  imageCreateInfo.flags,
13577  imageCreateInfo.imageType,
13578  imageCreateInfo.format,
13579  imageCreateInfo.extent.width,
13580  imageCreateInfo.extent.height,
13581  imageCreateInfo.extent.depth,
13582  imageCreateInfo.mipLevels,
13583  imageCreateInfo.arrayLayers,
13584  imageCreateInfo.samples,
13585  imageCreateInfo.tiling,
13586  imageCreateInfo.usage,
13587  imageCreateInfo.sharingMode,
13588  imageCreateInfo.initialLayout,
13589  allocCreateInfo.flags,
13590  allocCreateInfo.usage,
13591  allocCreateInfo.requiredFlags,
13592  allocCreateInfo.preferredFlags,
13593  allocCreateInfo.memoryTypeBits,
13594  allocCreateInfo.pool,
13595  allocation,
13596  userDataStr.GetString());
13597  Flush();
13598 }
13599 
13600 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
13601  VmaAllocation allocation)
13602 {
13603  CallParams callParams;
13604  GetBasicParams(callParams);
13605 
13606  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13607  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
13608  allocation);
13609  Flush();
13610 }
13611 
13612 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
13613  VmaAllocation allocation)
13614 {
13615  CallParams callParams;
13616  GetBasicParams(callParams);
13617 
13618  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13619  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
13620  allocation);
13621  Flush();
13622 }
13623 
13624 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
13625  VmaAllocation allocation)
13626 {
13627  CallParams callParams;
13628  GetBasicParams(callParams);
13629 
13630  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13631  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
13632  allocation);
13633  Flush();
13634 }
13635 
13636 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
13637  VmaAllocation allocation)
13638 {
13639  CallParams callParams;
13640  GetBasicParams(callParams);
13641 
13642  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13643  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
13644  allocation);
13645  Flush();
13646 }
13647 
13648 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
13649  VmaPool pool)
13650 {
13651  CallParams callParams;
13652  GetBasicParams(callParams);
13653 
13654  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13655  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
13656  pool);
13657  Flush();
13658 }
13659 
13660 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
13661  const VmaDefragmentationInfo2& info,
13663 {
13664  CallParams callParams;
13665  GetBasicParams(callParams);
13666 
13667  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13668  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
13669  info.flags);
13670  PrintPointerList(info.allocationCount, info.pAllocations);
13671  fprintf(m_File, ",");
13672  PrintPointerList(info.poolCount, info.pPools);
13673  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
13674  info.maxCpuBytesToMove,
13676  info.maxGpuBytesToMove,
13678  info.commandBuffer,
13679  ctx);
13680  Flush();
13681 }
13682 
13683 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
13685 {
13686  CallParams callParams;
13687  GetBasicParams(callParams);
13688 
13689  VmaMutexLock lock(m_FileMutex, m_UseMutex);
13690  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
13691  ctx);
13692  Flush();
13693 }
13694 
13695 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
13696 {
13697  if(pUserData != VMA_NULL)
13698  {
13699  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
13700  {
13701  m_Str = (const char*)pUserData;
13702  }
13703  else
13704  {
13705  sprintf_s(m_PtrStr, "%p", pUserData);
13706  m_Str = m_PtrStr;
13707  }
13708  }
13709  else
13710  {
13711  m_Str = "";
13712  }
13713 }
13714 
13715 void VmaRecorder::WriteConfiguration(
13716  const VkPhysicalDeviceProperties& devProps,
13717  const VkPhysicalDeviceMemoryProperties& memProps,
13718  bool dedicatedAllocationExtensionEnabled)
13719 {
13720  fprintf(m_File, "Config,Begin\n");
13721 
13722  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
13723  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
13724  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
13725  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
13726  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
13727  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
13728 
13729  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
13730  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
13731  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
13732 
13733  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
13734  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
13735  {
13736  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
13737  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
13738  }
13739  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
13740  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
13741  {
13742  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
13743  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
13744  }
13745 
13746  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
13747 
13748  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
13749  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
13750  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
13751  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
13752  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
13753  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
13754  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
13755  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
13756  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13757 
13758  fprintf(m_File, "Config,End\n");
13759 }
13760 
13761 void VmaRecorder::GetBasicParams(CallParams& outParams)
13762 {
13763  outParams.threadId = GetCurrentThreadId();
13764 
13765  LARGE_INTEGER counter;
13766  QueryPerformanceCounter(&counter);
13767  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
13768 }
13769 
13770 void VmaRecorder::Flush()
13771 {
13772  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
13773  {
13774  fflush(m_File);
13775  }
13776 }
13777 
13778 #endif // #if VMA_RECORDING_ENABLED
13779 
13781 // VmaAllocator_T
13782 
13783 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
13784  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
13785  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
13786  m_hDevice(pCreateInfo->device),
13787  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
13788  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
13789  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
13790  m_PreferredLargeHeapBlockSize(0),
13791  m_PhysicalDevice(pCreateInfo->physicalDevice),
13792  m_CurrentFrameIndex(0),
13793  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
13794  m_NextPoolId(0)
13796  ,m_pRecorder(VMA_NULL)
13797 #endif
13798 {
13799  if(VMA_DEBUG_DETECT_CORRUPTION)
13800  {
13801  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
13802  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
13803  }
13804 
13805  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
13806 
13807 #if !(VMA_DEDICATED_ALLOCATION)
13809  {
13810  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
13811  }
13812 #endif
13813 
13814  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
13815  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
13816  memset(&m_MemProps, 0, sizeof(m_MemProps));
13817 
13818  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
13819  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
13820 
13821  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
13822  {
13823  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
13824  }
13825 
13826  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
13827  {
13828  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
13829  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
13830  }
13831 
13832  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
13833 
13834  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
13835  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
13836 
13837  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
13838  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
13839  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
13840  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
13841 
13842  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
13843  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
13844 
13845  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
13846  {
13847  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
13848  {
13849  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
13850  if(limit != VK_WHOLE_SIZE)
13851  {
13852  m_HeapSizeLimit[heapIndex] = limit;
13853  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
13854  {
13855  m_MemProps.memoryHeaps[heapIndex].size = limit;
13856  }
13857  }
13858  }
13859  }
13860 
13861  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
13862  {
13863  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
13864 
13865  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
13866  this,
13867  memTypeIndex,
13868  preferredBlockSize,
13869  0,
13870  SIZE_MAX,
13871  GetBufferImageGranularity(),
13872  pCreateInfo->frameInUseCount,
13873  false, // isCustomPool
13874  false, // explicitBlockSize
13875  false); // linearAlgorithm
13876  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
13877  // becase minBlockCount is 0.
13878  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
13879 
13880  }
13881 }
13882 
13883 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
13884 {
13885  VkResult res = VK_SUCCESS;
13886 
13887  if(pCreateInfo->pRecordSettings != VMA_NULL &&
13888  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
13889  {
13890 #if VMA_RECORDING_ENABLED
13891  m_pRecorder = vma_new(this, VmaRecorder)();
13892  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
13893  if(res != VK_SUCCESS)
13894  {
13895  return res;
13896  }
13897  m_pRecorder->WriteConfiguration(
13898  m_PhysicalDeviceProperties,
13899  m_MemProps,
13900  m_UseKhrDedicatedAllocation);
13901  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
13902 #else
13903  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
13904  return VK_ERROR_FEATURE_NOT_PRESENT;
13905 #endif
13906  }
13907 
13908  return res;
13909 }
13910 
13911 VmaAllocator_T::~VmaAllocator_T()
13912 {
13913 #if VMA_RECORDING_ENABLED
13914  if(m_pRecorder != VMA_NULL)
13915  {
13916  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
13917  vma_delete(this, m_pRecorder);
13918  }
13919 #endif
13920 
13921  VMA_ASSERT(m_Pools.empty());
13922 
13923  for(size_t i = GetMemoryTypeCount(); i--; )
13924  {
13925  vma_delete(this, m_pDedicatedAllocations[i]);
13926  vma_delete(this, m_pBlockVectors[i]);
13927  }
13928 }
13929 
13930 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
13931 {
13932 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
13933  m_VulkanFunctions.vkGetPhysicalDeviceProperties = &vkGetPhysicalDeviceProperties;
13934  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = &vkGetPhysicalDeviceMemoryProperties;
13935  m_VulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
13936  m_VulkanFunctions.vkFreeMemory = &vkFreeMemory;
13937  m_VulkanFunctions.vkMapMemory = &vkMapMemory;
13938  m_VulkanFunctions.vkUnmapMemory = &vkUnmapMemory;
13939  m_VulkanFunctions.vkFlushMappedMemoryRanges = &vkFlushMappedMemoryRanges;
13940  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = &vkInvalidateMappedMemoryRanges;
13941  m_VulkanFunctions.vkBindBufferMemory = &vkBindBufferMemory;
13942  m_VulkanFunctions.vkBindImageMemory = &vkBindImageMemory;
13943  m_VulkanFunctions.vkGetBufferMemoryRequirements = &vkGetBufferMemoryRequirements;
13944  m_VulkanFunctions.vkGetImageMemoryRequirements = &vkGetImageMemoryRequirements;
13945  m_VulkanFunctions.vkCreateBuffer = &vkCreateBuffer;
13946  m_VulkanFunctions.vkDestroyBuffer = &vkDestroyBuffer;
13947  m_VulkanFunctions.vkCreateImage = &vkCreateImage;
13948  m_VulkanFunctions.vkDestroyImage = &vkDestroyImage;
13949  m_VulkanFunctions.vkCmdCopyBuffer = &vkCmdCopyBuffer;
13950 #if VMA_DEDICATED_ALLOCATION
13951  if(m_UseKhrDedicatedAllocation)
13952  {
13953  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
13954  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
13955  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
13956  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
13957  }
13958 #endif // #if VMA_DEDICATED_ALLOCATION
13959 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
13960 
13961 #define VMA_COPY_IF_NOT_NULL(funcName) \
13962  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
13963 
13964  if(pVulkanFunctions != VMA_NULL)
13965  {
13966  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
13967  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
13968  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
13969  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
13970  VMA_COPY_IF_NOT_NULL(vkMapMemory);
13971  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
13972  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
13973  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
13974  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
13975  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
13976  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
13977  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
13978  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
13979  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
13980  VMA_COPY_IF_NOT_NULL(vkCreateImage);
13981  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
13982  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
13983 #if VMA_DEDICATED_ALLOCATION
13984  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
13985  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
13986 #endif
13987  }
13988 
13989 #undef VMA_COPY_IF_NOT_NULL
13990 
13991  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
13992  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
13993  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
13994  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
13995  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
13996  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
13997  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
13998  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
13999  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
14000  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
14001  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
14002  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
14003  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
14004  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
14005  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
14006  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
14007  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
14008  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
14009  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
14010 #if VMA_DEDICATED_ALLOCATION
14011  if(m_UseKhrDedicatedAllocation)
14012  {
14013  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
14014  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
14015  }
14016 #endif
14017 }
14018 
14019 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
14020 {
14021  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14022  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
14023  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
14024  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
14025 }
14026 
14027 VkResult VmaAllocator_T::AllocateMemoryOfType(
14028  VkDeviceSize size,
14029  VkDeviceSize alignment,
14030  bool dedicatedAllocation,
14031  VkBuffer dedicatedBuffer,
14032  VkImage dedicatedImage,
14033  const VmaAllocationCreateInfo& createInfo,
14034  uint32_t memTypeIndex,
14035  VmaSuballocationType suballocType,
14036  VmaAllocation* pAllocation)
14037 {
14038  VMA_ASSERT(pAllocation != VMA_NULL);
14039  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, Size=%llu", memTypeIndex, vkMemReq.size);
14040 
14041  VmaAllocationCreateInfo finalCreateInfo = createInfo;
14042 
14043  // If memory type is not HOST_VISIBLE, disable MAPPED.
14044  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14045  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
14046  {
14047  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
14048  }
14049 
14050  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
14051  VMA_ASSERT(blockVector);
14052 
14053  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
14054  bool preferDedicatedMemory =
14055  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
14056  dedicatedAllocation ||
14057  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
14058  size > preferredBlockSize / 2;
14059 
14060  if(preferDedicatedMemory &&
14061  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
14062  finalCreateInfo.pool == VK_NULL_HANDLE)
14063  {
14065  }
14066 
14067  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
14068  {
14069  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14070  {
14071  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14072  }
14073  else
14074  {
14075  return AllocateDedicatedMemory(
14076  size,
14077  suballocType,
14078  memTypeIndex,
14079  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14080  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14081  finalCreateInfo.pUserData,
14082  dedicatedBuffer,
14083  dedicatedImage,
14084  pAllocation);
14085  }
14086  }
14087  else
14088  {
14089  VkResult res = blockVector->Allocate(
14090  VK_NULL_HANDLE, // hCurrentPool
14091  m_CurrentFrameIndex.load(),
14092  size,
14093  alignment,
14094  finalCreateInfo,
14095  suballocType,
14096  pAllocation);
14097  if(res == VK_SUCCESS)
14098  {
14099  return res;
14100  }
14101 
14102  // 5. Try dedicated memory.
14103  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14104  {
14105  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14106  }
14107  else
14108  {
14109  res = AllocateDedicatedMemory(
14110  size,
14111  suballocType,
14112  memTypeIndex,
14113  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
14114  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
14115  finalCreateInfo.pUserData,
14116  dedicatedBuffer,
14117  dedicatedImage,
14118  pAllocation);
14119  if(res == VK_SUCCESS)
14120  {
14121  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
14122  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
14123  return VK_SUCCESS;
14124  }
14125  else
14126  {
14127  // Everything failed: Return error code.
14128  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14129  return res;
14130  }
14131  }
14132  }
14133 }
14134 
14135 VkResult VmaAllocator_T::AllocateDedicatedMemory(
14136  VkDeviceSize size,
14137  VmaSuballocationType suballocType,
14138  uint32_t memTypeIndex,
14139  bool map,
14140  bool isUserDataString,
14141  void* pUserData,
14142  VkBuffer dedicatedBuffer,
14143  VkImage dedicatedImage,
14144  VmaAllocation* pAllocation)
14145 {
14146  VMA_ASSERT(pAllocation);
14147 
14148  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
14149  allocInfo.memoryTypeIndex = memTypeIndex;
14150  allocInfo.allocationSize = size;
14151 
14152 #if VMA_DEDICATED_ALLOCATION
14153  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
14154  if(m_UseKhrDedicatedAllocation)
14155  {
14156  if(dedicatedBuffer != VK_NULL_HANDLE)
14157  {
14158  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
14159  dedicatedAllocInfo.buffer = dedicatedBuffer;
14160  allocInfo.pNext = &dedicatedAllocInfo;
14161  }
14162  else if(dedicatedImage != VK_NULL_HANDLE)
14163  {
14164  dedicatedAllocInfo.image = dedicatedImage;
14165  allocInfo.pNext = &dedicatedAllocInfo;
14166  }
14167  }
14168 #endif // #if VMA_DEDICATED_ALLOCATION
14169 
14170  // Allocate VkDeviceMemory.
14171  VkDeviceMemory hMemory = VK_NULL_HANDLE;
14172  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
14173  if(res < 0)
14174  {
14175  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
14176  return res;
14177  }
14178 
14179  void* pMappedData = VMA_NULL;
14180  if(map)
14181  {
14182  res = (*m_VulkanFunctions.vkMapMemory)(
14183  m_hDevice,
14184  hMemory,
14185  0,
14186  VK_WHOLE_SIZE,
14187  0,
14188  &pMappedData);
14189  if(res < 0)
14190  {
14191  VMA_DEBUG_LOG(" vkMapMemory FAILED");
14192  FreeVulkanMemory(memTypeIndex, size, hMemory);
14193  return res;
14194  }
14195  }
14196 
14197  *pAllocation = vma_new(this, VmaAllocation_T)(m_CurrentFrameIndex.load(), isUserDataString);
14198  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
14199  (*pAllocation)->SetUserData(this, pUserData);
14200  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14201  {
14202  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
14203  }
14204 
14205  // Register it in m_pDedicatedAllocations.
14206  {
14207  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14208  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
14209  VMA_ASSERT(pDedicatedAllocations);
14210  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, *pAllocation);
14211  }
14212 
14213  VMA_DEBUG_LOG(" Allocated DedicatedMemory MemoryTypeIndex=#%u", memTypeIndex);
14214 
14215  return VK_SUCCESS;
14216 }
14217 
14218 void VmaAllocator_T::GetBufferMemoryRequirements(
14219  VkBuffer hBuffer,
14220  VkMemoryRequirements& memReq,
14221  bool& requiresDedicatedAllocation,
14222  bool& prefersDedicatedAllocation) const
14223 {
14224 #if VMA_DEDICATED_ALLOCATION
14225  if(m_UseKhrDedicatedAllocation)
14226  {
14227  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
14228  memReqInfo.buffer = hBuffer;
14229 
14230  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14231 
14232  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14233  memReq2.pNext = &memDedicatedReq;
14234 
14235  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14236 
14237  memReq = memReq2.memoryRequirements;
14238  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14239  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14240  }
14241  else
14242 #endif // #if VMA_DEDICATED_ALLOCATION
14243  {
14244  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
14245  requiresDedicatedAllocation = false;
14246  prefersDedicatedAllocation = false;
14247  }
14248 }
14249 
14250 void VmaAllocator_T::GetImageMemoryRequirements(
14251  VkImage hImage,
14252  VkMemoryRequirements& memReq,
14253  bool& requiresDedicatedAllocation,
14254  bool& prefersDedicatedAllocation) const
14255 {
14256 #if VMA_DEDICATED_ALLOCATION
14257  if(m_UseKhrDedicatedAllocation)
14258  {
14259  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
14260  memReqInfo.image = hImage;
14261 
14262  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
14263 
14264  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
14265  memReq2.pNext = &memDedicatedReq;
14266 
14267  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
14268 
14269  memReq = memReq2.memoryRequirements;
14270  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
14271  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
14272  }
14273  else
14274 #endif // #if VMA_DEDICATED_ALLOCATION
14275  {
14276  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
14277  requiresDedicatedAllocation = false;
14278  prefersDedicatedAllocation = false;
14279  }
14280 }
14281 
14282 VkResult VmaAllocator_T::AllocateMemory(
14283  const VkMemoryRequirements& vkMemReq,
14284  bool requiresDedicatedAllocation,
14285  bool prefersDedicatedAllocation,
14286  VkBuffer dedicatedBuffer,
14287  VkImage dedicatedImage,
14288  const VmaAllocationCreateInfo& createInfo,
14289  VmaSuballocationType suballocType,
14290  VmaAllocation* pAllocation)
14291 {
14292  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
14293 
14294  if(vkMemReq.size == 0)
14295  {
14296  return VK_ERROR_VALIDATION_FAILED_EXT;
14297  }
14298  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
14299  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14300  {
14301  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
14302  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14303  }
14304  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
14306  {
14307  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
14308  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14309  }
14310  if(requiresDedicatedAllocation)
14311  {
14312  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
14313  {
14314  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
14315  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14316  }
14317  if(createInfo.pool != VK_NULL_HANDLE)
14318  {
14319  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
14320  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14321  }
14322  }
14323  if((createInfo.pool != VK_NULL_HANDLE) &&
14324  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
14325  {
14326  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
14327  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14328  }
14329 
14330  if(createInfo.pool != VK_NULL_HANDLE)
14331  {
14332  const VkDeviceSize alignmentForPool = VMA_MAX(
14333  vkMemReq.alignment,
14334  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
14335  return createInfo.pool->m_BlockVector.Allocate(
14336  createInfo.pool,
14337  m_CurrentFrameIndex.load(),
14338  vkMemReq.size,
14339  alignmentForPool,
14340  createInfo,
14341  suballocType,
14342  pAllocation);
14343  }
14344  else
14345  {
14346  // Bit mask of memory Vulkan types acceptable for this allocation.
14347  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
14348  uint32_t memTypeIndex = UINT32_MAX;
14349  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14350  if(res == VK_SUCCESS)
14351  {
14352  VkDeviceSize alignmentForMemType = VMA_MAX(
14353  vkMemReq.alignment,
14354  GetMemoryTypeMinAlignment(memTypeIndex));
14355 
14356  res = AllocateMemoryOfType(
14357  vkMemReq.size,
14358  alignmentForMemType,
14359  requiresDedicatedAllocation || prefersDedicatedAllocation,
14360  dedicatedBuffer,
14361  dedicatedImage,
14362  createInfo,
14363  memTypeIndex,
14364  suballocType,
14365  pAllocation);
14366  // Succeeded on first try.
14367  if(res == VK_SUCCESS)
14368  {
14369  return res;
14370  }
14371  // Allocation from this memory type failed. Try other compatible memory types.
14372  else
14373  {
14374  for(;;)
14375  {
14376  // Remove old memTypeIndex from list of possibilities.
14377  memoryTypeBits &= ~(1u << memTypeIndex);
14378  // Find alternative memTypeIndex.
14379  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
14380  if(res == VK_SUCCESS)
14381  {
14382  alignmentForMemType = VMA_MAX(
14383  vkMemReq.alignment,
14384  GetMemoryTypeMinAlignment(memTypeIndex));
14385 
14386  res = AllocateMemoryOfType(
14387  vkMemReq.size,
14388  alignmentForMemType,
14389  requiresDedicatedAllocation || prefersDedicatedAllocation,
14390  dedicatedBuffer,
14391  dedicatedImage,
14392  createInfo,
14393  memTypeIndex,
14394  suballocType,
14395  pAllocation);
14396  // Allocation from this alternative memory type succeeded.
14397  if(res == VK_SUCCESS)
14398  {
14399  return res;
14400  }
14401  // else: Allocation from this memory type failed. Try next one - next loop iteration.
14402  }
14403  // No other matching memory type index could be found.
14404  else
14405  {
14406  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
14407  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
14408  }
14409  }
14410  }
14411  }
14412  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
14413  else
14414  return res;
14415  }
14416 }
14417 
14418 void VmaAllocator_T::FreeMemory(const VmaAllocation allocation)
14419 {
14420  VMA_ASSERT(allocation);
14421 
14422  if(TouchAllocation(allocation))
14423  {
14424  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
14425  {
14426  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
14427  }
14428 
14429  switch(allocation->GetType())
14430  {
14431  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14432  {
14433  VmaBlockVector* pBlockVector = VMA_NULL;
14434  VmaPool hPool = allocation->GetPool();
14435  if(hPool != VK_NULL_HANDLE)
14436  {
14437  pBlockVector = &hPool->m_BlockVector;
14438  }
14439  else
14440  {
14441  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
14442  pBlockVector = m_pBlockVectors[memTypeIndex];
14443  }
14444  pBlockVector->Free(allocation);
14445  }
14446  break;
14447  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14448  FreeDedicatedMemory(allocation);
14449  break;
14450  default:
14451  VMA_ASSERT(0);
14452  }
14453  }
14454 
14455  allocation->SetUserData(this, VMA_NULL);
14456  vma_delete(this, allocation);
14457 }
14458 
14459 VkResult VmaAllocator_T::ResizeAllocation(
14460  const VmaAllocation alloc,
14461  VkDeviceSize newSize)
14462 {
14463  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
14464  {
14465  return VK_ERROR_VALIDATION_FAILED_EXT;
14466  }
14467  if(newSize == alloc->GetSize())
14468  {
14469  return VK_SUCCESS;
14470  }
14471 
14472  switch(alloc->GetType())
14473  {
14474  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14475  return VK_ERROR_FEATURE_NOT_PRESENT;
14476  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14477  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
14478  {
14479  alloc->ChangeSize(newSize);
14480  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
14481  return VK_SUCCESS;
14482  }
14483  else
14484  {
14485  return VK_ERROR_OUT_OF_POOL_MEMORY;
14486  }
14487  default:
14488  VMA_ASSERT(0);
14489  return VK_ERROR_VALIDATION_FAILED_EXT;
14490  }
14491 }
14492 
14493 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
14494 {
14495  // Initialize.
14496  InitStatInfo(pStats->total);
14497  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
14498  InitStatInfo(pStats->memoryType[i]);
14499  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
14500  InitStatInfo(pStats->memoryHeap[i]);
14501 
14502  // Process default pools.
14503  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14504  {
14505  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14506  VMA_ASSERT(pBlockVector);
14507  pBlockVector->AddStats(pStats);
14508  }
14509 
14510  // Process custom pools.
14511  {
14512  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14513  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14514  {
14515  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
14516  }
14517  }
14518 
14519  // Process dedicated allocations.
14520  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14521  {
14522  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
14523  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
14524  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
14525  VMA_ASSERT(pDedicatedAllocVector);
14526  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
14527  {
14528  VmaStatInfo allocationStatInfo;
14529  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
14530  VmaAddStatInfo(pStats->total, allocationStatInfo);
14531  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14532  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14533  }
14534  }
14535 
14536  // Postprocess.
14537  VmaPostprocessCalcStatInfo(pStats->total);
14538  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
14539  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
14540  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
14541  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
14542 }
14543 
14544 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
14545 
14546 VkResult VmaAllocator_T::DefragmentationBegin(
14547  const VmaDefragmentationInfo2& info,
14548  VmaDefragmentationStats* pStats,
14549  VmaDefragmentationContext* pContext)
14550 {
14551  if(info.pAllocationsChanged != VMA_NULL)
14552  {
14553  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
14554  }
14555 
14556  *pContext = vma_new(this, VmaDefragmentationContext_T)(
14557  this, m_CurrentFrameIndex.load(), info.flags, pStats);
14558 
14559  (*pContext)->AddPools(info.poolCount, info.pPools);
14560  (*pContext)->AddAllocations(
14562 
14563  VkResult res = (*pContext)->Defragment(
14566  info.commandBuffer, pStats);
14567 
14568  if(res != VK_NOT_READY)
14569  {
14570  vma_delete(this, *pContext);
14571  *pContext = VMA_NULL;
14572  }
14573 
14574  return res;
14575 }
14576 
14577 VkResult VmaAllocator_T::DefragmentationEnd(
14578  VmaDefragmentationContext context)
14579 {
14580  vma_delete(this, context);
14581  return VK_SUCCESS;
14582 }
14583 
14584 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
14585 {
14586  if(hAllocation->CanBecomeLost())
14587  {
14588  /*
14589  Warning: This is a carefully designed algorithm.
14590  Do not modify unless you really know what you're doing :)
14591  */
14592  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14593  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14594  for(;;)
14595  {
14596  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14597  {
14598  pAllocationInfo->memoryType = UINT32_MAX;
14599  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
14600  pAllocationInfo->offset = 0;
14601  pAllocationInfo->size = hAllocation->GetSize();
14602  pAllocationInfo->pMappedData = VMA_NULL;
14603  pAllocationInfo->pUserData = hAllocation->GetUserData();
14604  return;
14605  }
14606  else if(localLastUseFrameIndex == localCurrFrameIndex)
14607  {
14608  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14609  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14610  pAllocationInfo->offset = hAllocation->GetOffset();
14611  pAllocationInfo->size = hAllocation->GetSize();
14612  pAllocationInfo->pMappedData = VMA_NULL;
14613  pAllocationInfo->pUserData = hAllocation->GetUserData();
14614  return;
14615  }
14616  else // Last use time earlier than current time.
14617  {
14618  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14619  {
14620  localLastUseFrameIndex = localCurrFrameIndex;
14621  }
14622  }
14623  }
14624  }
14625  else
14626  {
14627 #if VMA_STATS_STRING_ENABLED
14628  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14629  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14630  for(;;)
14631  {
14632  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14633  if(localLastUseFrameIndex == localCurrFrameIndex)
14634  {
14635  break;
14636  }
14637  else // Last use time earlier than current time.
14638  {
14639  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14640  {
14641  localLastUseFrameIndex = localCurrFrameIndex;
14642  }
14643  }
14644  }
14645 #endif
14646 
14647  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
14648  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
14649  pAllocationInfo->offset = hAllocation->GetOffset();
14650  pAllocationInfo->size = hAllocation->GetSize();
14651  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
14652  pAllocationInfo->pUserData = hAllocation->GetUserData();
14653  }
14654 }
14655 
14656 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
14657 {
14658  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
14659  if(hAllocation->CanBecomeLost())
14660  {
14661  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14662  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14663  for(;;)
14664  {
14665  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
14666  {
14667  return false;
14668  }
14669  else if(localLastUseFrameIndex == localCurrFrameIndex)
14670  {
14671  return true;
14672  }
14673  else // Last use time earlier than current time.
14674  {
14675  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14676  {
14677  localLastUseFrameIndex = localCurrFrameIndex;
14678  }
14679  }
14680  }
14681  }
14682  else
14683  {
14684 #if VMA_STATS_STRING_ENABLED
14685  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
14686  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
14687  for(;;)
14688  {
14689  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
14690  if(localLastUseFrameIndex == localCurrFrameIndex)
14691  {
14692  break;
14693  }
14694  else // Last use time earlier than current time.
14695  {
14696  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
14697  {
14698  localLastUseFrameIndex = localCurrFrameIndex;
14699  }
14700  }
14701  }
14702 #endif
14703 
14704  return true;
14705  }
14706 }
14707 
14708 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
14709 {
14710  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
14711 
14712  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
14713 
14714  if(newCreateInfo.maxBlockCount == 0)
14715  {
14716  newCreateInfo.maxBlockCount = SIZE_MAX;
14717  }
14718  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
14719  {
14720  return VK_ERROR_INITIALIZATION_FAILED;
14721  }
14722 
14723  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
14724 
14725  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
14726 
14727  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
14728  if(res != VK_SUCCESS)
14729  {
14730  vma_delete(this, *pPool);
14731  *pPool = VMA_NULL;
14732  return res;
14733  }
14734 
14735  // Add to m_Pools.
14736  {
14737  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
14738  (*pPool)->SetId(m_NextPoolId++);
14739  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
14740  }
14741 
14742  return VK_SUCCESS;
14743 }
14744 
14745 void VmaAllocator_T::DestroyPool(VmaPool pool)
14746 {
14747  // Remove from m_Pools.
14748  {
14749  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
14750  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
14751  VMA_ASSERT(success && "Pool not found in Allocator.");
14752  }
14753 
14754  vma_delete(this, pool);
14755 }
14756 
14757 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
14758 {
14759  pool->m_BlockVector.GetPoolStats(pPoolStats);
14760 }
14761 
14762 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
14763 {
14764  m_CurrentFrameIndex.store(frameIndex);
14765 }
14766 
14767 void VmaAllocator_T::MakePoolAllocationsLost(
14768  VmaPool hPool,
14769  size_t* pLostAllocationCount)
14770 {
14771  hPool->m_BlockVector.MakePoolAllocationsLost(
14772  m_CurrentFrameIndex.load(),
14773  pLostAllocationCount);
14774 }
14775 
14776 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
14777 {
14778  return hPool->m_BlockVector.CheckCorruption();
14779 }
14780 
14781 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
14782 {
14783  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
14784 
14785  // Process default pools.
14786  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
14787  {
14788  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
14789  {
14790  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
14791  VMA_ASSERT(pBlockVector);
14792  VkResult localRes = pBlockVector->CheckCorruption();
14793  switch(localRes)
14794  {
14795  case VK_ERROR_FEATURE_NOT_PRESENT:
14796  break;
14797  case VK_SUCCESS:
14798  finalRes = VK_SUCCESS;
14799  break;
14800  default:
14801  return localRes;
14802  }
14803  }
14804  }
14805 
14806  // Process custom pools.
14807  {
14808  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
14809  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
14810  {
14811  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
14812  {
14813  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
14814  switch(localRes)
14815  {
14816  case VK_ERROR_FEATURE_NOT_PRESENT:
14817  break;
14818  case VK_SUCCESS:
14819  finalRes = VK_SUCCESS;
14820  break;
14821  default:
14822  return localRes;
14823  }
14824  }
14825  }
14826  }
14827 
14828  return finalRes;
14829 }
14830 
14831 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
14832 {
14833  *pAllocation = vma_new(this, VmaAllocation_T)(VMA_FRAME_INDEX_LOST, false);
14834  (*pAllocation)->InitLost();
14835 }
14836 
14837 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
14838 {
14839  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
14840 
14841  VkResult res;
14842  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
14843  {
14844  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
14845  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
14846  {
14847  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
14848  if(res == VK_SUCCESS)
14849  {
14850  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
14851  }
14852  }
14853  else
14854  {
14855  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
14856  }
14857  }
14858  else
14859  {
14860  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
14861  }
14862 
14863  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
14864  {
14865  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
14866  }
14867 
14868  return res;
14869 }
14870 
14871 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
14872 {
14873  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
14874  {
14875  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
14876  }
14877 
14878  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
14879 
14880  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
14881  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
14882  {
14883  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
14884  m_HeapSizeLimit[heapIndex] += size;
14885  }
14886 }
14887 
14888 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
14889 {
14890  if(hAllocation->CanBecomeLost())
14891  {
14892  return VK_ERROR_MEMORY_MAP_FAILED;
14893  }
14894 
14895  switch(hAllocation->GetType())
14896  {
14897  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14898  {
14899  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
14900  char *pBytes = VMA_NULL;
14901  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
14902  if(res == VK_SUCCESS)
14903  {
14904  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
14905  hAllocation->BlockAllocMap();
14906  }
14907  return res;
14908  }
14909  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14910  return hAllocation->DedicatedAllocMap(this, ppData);
14911  default:
14912  VMA_ASSERT(0);
14913  return VK_ERROR_MEMORY_MAP_FAILED;
14914  }
14915 }
14916 
14917 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
14918 {
14919  switch(hAllocation->GetType())
14920  {
14921  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14922  {
14923  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
14924  hAllocation->BlockAllocUnmap();
14925  pBlock->Unmap(this, 1);
14926  }
14927  break;
14928  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14929  hAllocation->DedicatedAllocUnmap(this);
14930  break;
14931  default:
14932  VMA_ASSERT(0);
14933  }
14934 }
14935 
14936 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
14937 {
14938  VkResult res = VK_SUCCESS;
14939  switch(hAllocation->GetType())
14940  {
14941  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14942  res = GetVulkanFunctions().vkBindBufferMemory(
14943  m_hDevice,
14944  hBuffer,
14945  hAllocation->GetMemory(),
14946  0); //memoryOffset
14947  break;
14948  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14949  {
14950  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
14951  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
14952  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
14953  break;
14954  }
14955  default:
14956  VMA_ASSERT(0);
14957  }
14958  return res;
14959 }
14960 
14961 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
14962 {
14963  VkResult res = VK_SUCCESS;
14964  switch(hAllocation->GetType())
14965  {
14966  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
14967  res = GetVulkanFunctions().vkBindImageMemory(
14968  m_hDevice,
14969  hImage,
14970  hAllocation->GetMemory(),
14971  0); //memoryOffset
14972  break;
14973  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
14974  {
14975  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
14976  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
14977  res = pBlock->BindImageMemory(this, hAllocation, hImage);
14978  break;
14979  }
14980  default:
14981  VMA_ASSERT(0);
14982  }
14983  return res;
14984 }
14985 
14986 void VmaAllocator_T::FlushOrInvalidateAllocation(
14987  VmaAllocation hAllocation,
14988  VkDeviceSize offset, VkDeviceSize size,
14989  VMA_CACHE_OPERATION op)
14990 {
14991  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
14992  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
14993  {
14994  const VkDeviceSize allocationSize = hAllocation->GetSize();
14995  VMA_ASSERT(offset <= allocationSize);
14996 
14997  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
14998 
14999  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
15000  memRange.memory = hAllocation->GetMemory();
15001 
15002  switch(hAllocation->GetType())
15003  {
15004  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
15005  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15006  if(size == VK_WHOLE_SIZE)
15007  {
15008  memRange.size = allocationSize - memRange.offset;
15009  }
15010  else
15011  {
15012  VMA_ASSERT(offset + size <= allocationSize);
15013  memRange.size = VMA_MIN(
15014  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
15015  allocationSize - memRange.offset);
15016  }
15017  break;
15018 
15019  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
15020  {
15021  // 1. Still within this allocation.
15022  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
15023  if(size == VK_WHOLE_SIZE)
15024  {
15025  size = allocationSize - offset;
15026  }
15027  else
15028  {
15029  VMA_ASSERT(offset + size <= allocationSize);
15030  }
15031  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
15032 
15033  // 2. Adjust to whole block.
15034  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
15035  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
15036  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
15037  memRange.offset += allocationOffset;
15038  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
15039 
15040  break;
15041  }
15042 
15043  default:
15044  VMA_ASSERT(0);
15045  }
15046 
15047  switch(op)
15048  {
15049  case VMA_CACHE_FLUSH:
15050  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
15051  break;
15052  case VMA_CACHE_INVALIDATE:
15053  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
15054  break;
15055  default:
15056  VMA_ASSERT(0);
15057  }
15058  }
15059  // else: Just ignore this call.
15060 }
15061 
15062 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
15063 {
15064  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
15065 
15066  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
15067  {
15068  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15069  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
15070  VMA_ASSERT(pDedicatedAllocations);
15071  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
15072  VMA_ASSERT(success);
15073  }
15074 
15075  VkDeviceMemory hMemory = allocation->GetMemory();
15076 
15077  /*
15078  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
15079  before vkFreeMemory.
15080 
15081  if(allocation->GetMappedData() != VMA_NULL)
15082  {
15083  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
15084  }
15085  */
15086 
15087  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
15088 
15089  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
15090 }
15091 
15092 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
15093 {
15094  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
15095  !hAllocation->CanBecomeLost() &&
15096  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15097  {
15098  void* pData = VMA_NULL;
15099  VkResult res = Map(hAllocation, &pData);
15100  if(res == VK_SUCCESS)
15101  {
15102  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
15103  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
15104  Unmap(hAllocation);
15105  }
15106  else
15107  {
15108  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
15109  }
15110  }
15111 }
15112 
15113 #if VMA_STATS_STRING_ENABLED
15114 
15115 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
15116 {
15117  bool dedicatedAllocationsStarted = false;
15118  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15119  {
15120  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
15121  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
15122  VMA_ASSERT(pDedicatedAllocVector);
15123  if(pDedicatedAllocVector->empty() == false)
15124  {
15125  if(dedicatedAllocationsStarted == false)
15126  {
15127  dedicatedAllocationsStarted = true;
15128  json.WriteString("DedicatedAllocations");
15129  json.BeginObject();
15130  }
15131 
15132  json.BeginString("Type ");
15133  json.ContinueString(memTypeIndex);
15134  json.EndString();
15135 
15136  json.BeginArray();
15137 
15138  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
15139  {
15140  json.BeginObject(true);
15141  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
15142  hAlloc->PrintParameters(json);
15143  json.EndObject();
15144  }
15145 
15146  json.EndArray();
15147  }
15148  }
15149  if(dedicatedAllocationsStarted)
15150  {
15151  json.EndObject();
15152  }
15153 
15154  {
15155  bool allocationsStarted = false;
15156  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15157  {
15158  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
15159  {
15160  if(allocationsStarted == false)
15161  {
15162  allocationsStarted = true;
15163  json.WriteString("DefaultPools");
15164  json.BeginObject();
15165  }
15166 
15167  json.BeginString("Type ");
15168  json.ContinueString(memTypeIndex);
15169  json.EndString();
15170 
15171  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
15172  }
15173  }
15174  if(allocationsStarted)
15175  {
15176  json.EndObject();
15177  }
15178  }
15179 
15180  // Custom pools
15181  {
15182  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
15183  const size_t poolCount = m_Pools.size();
15184  if(poolCount > 0)
15185  {
15186  json.WriteString("Pools");
15187  json.BeginObject();
15188  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15189  {
15190  json.BeginString();
15191  json.ContinueString(m_Pools[poolIndex]->GetId());
15192  json.EndString();
15193 
15194  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
15195  }
15196  json.EndObject();
15197  }
15198  }
15199 }
15200 
15201 #endif // #if VMA_STATS_STRING_ENABLED
15202 
15204 // Public interface
15205 
15206 VkResult vmaCreateAllocator(
15207  const VmaAllocatorCreateInfo* pCreateInfo,
15208  VmaAllocator* pAllocator)
15209 {
15210  VMA_ASSERT(pCreateInfo && pAllocator);
15211  VMA_DEBUG_LOG("vmaCreateAllocator");
15212  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
15213  return (*pAllocator)->Init(pCreateInfo);
15214 }
15215 
15216 void vmaDestroyAllocator(
15217  VmaAllocator allocator)
15218 {
15219  if(allocator != VK_NULL_HANDLE)
15220  {
15221  VMA_DEBUG_LOG("vmaDestroyAllocator");
15222  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
15223  vma_delete(&allocationCallbacks, allocator);
15224  }
15225 }
15226 
15228  VmaAllocator allocator,
15229  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
15230 {
15231  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
15232  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
15233 }
15234 
15236  VmaAllocator allocator,
15237  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
15238 {
15239  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
15240  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
15241 }
15242 
15244  VmaAllocator allocator,
15245  uint32_t memoryTypeIndex,
15246  VkMemoryPropertyFlags* pFlags)
15247 {
15248  VMA_ASSERT(allocator && pFlags);
15249  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
15250  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
15251 }
15252 
15254  VmaAllocator allocator,
15255  uint32_t frameIndex)
15256 {
15257  VMA_ASSERT(allocator);
15258  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
15259 
15260  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15261 
15262  allocator->SetCurrentFrameIndex(frameIndex);
15263 }
15264 
15265 void vmaCalculateStats(
15266  VmaAllocator allocator,
15267  VmaStats* pStats)
15268 {
15269  VMA_ASSERT(allocator && pStats);
15270  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15271  allocator->CalculateStats(pStats);
15272 }
15273 
15274 #if VMA_STATS_STRING_ENABLED
15275 
15276 void vmaBuildStatsString(
15277  VmaAllocator allocator,
15278  char** ppStatsString,
15279  VkBool32 detailedMap)
15280 {
15281  VMA_ASSERT(allocator && ppStatsString);
15282  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15283 
15284  VmaStringBuilder sb(allocator);
15285  {
15286  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
15287  json.BeginObject();
15288 
15289  VmaStats stats;
15290  allocator->CalculateStats(&stats);
15291 
15292  json.WriteString("Total");
15293  VmaPrintStatInfo(json, stats.total);
15294 
15295  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
15296  {
15297  json.BeginString("Heap ");
15298  json.ContinueString(heapIndex);
15299  json.EndString();
15300  json.BeginObject();
15301 
15302  json.WriteString("Size");
15303  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
15304 
15305  json.WriteString("Flags");
15306  json.BeginArray(true);
15307  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
15308  {
15309  json.WriteString("DEVICE_LOCAL");
15310  }
15311  json.EndArray();
15312 
15313  if(stats.memoryHeap[heapIndex].blockCount > 0)
15314  {
15315  json.WriteString("Stats");
15316  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
15317  }
15318 
15319  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
15320  {
15321  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
15322  {
15323  json.BeginString("Type ");
15324  json.ContinueString(typeIndex);
15325  json.EndString();
15326 
15327  json.BeginObject();
15328 
15329  json.WriteString("Flags");
15330  json.BeginArray(true);
15331  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
15332  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
15333  {
15334  json.WriteString("DEVICE_LOCAL");
15335  }
15336  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
15337  {
15338  json.WriteString("HOST_VISIBLE");
15339  }
15340  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
15341  {
15342  json.WriteString("HOST_COHERENT");
15343  }
15344  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
15345  {
15346  json.WriteString("HOST_CACHED");
15347  }
15348  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
15349  {
15350  json.WriteString("LAZILY_ALLOCATED");
15351  }
15352  json.EndArray();
15353 
15354  if(stats.memoryType[typeIndex].blockCount > 0)
15355  {
15356  json.WriteString("Stats");
15357  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
15358  }
15359 
15360  json.EndObject();
15361  }
15362  }
15363 
15364  json.EndObject();
15365  }
15366  if(detailedMap == VK_TRUE)
15367  {
15368  allocator->PrintDetailedMap(json);
15369  }
15370 
15371  json.EndObject();
15372  }
15373 
15374  const size_t len = sb.GetLength();
15375  char* const pChars = vma_new_array(allocator, char, len + 1);
15376  if(len > 0)
15377  {
15378  memcpy(pChars, sb.GetData(), len);
15379  }
15380  pChars[len] = '\0';
15381  *ppStatsString = pChars;
15382 }
15383 
15384 void vmaFreeStatsString(
15385  VmaAllocator allocator,
15386  char* pStatsString)
15387 {
15388  if(pStatsString != VMA_NULL)
15389  {
15390  VMA_ASSERT(allocator);
15391  size_t len = strlen(pStatsString);
15392  vma_delete_array(allocator, pStatsString, len + 1);
15393  }
15394 }
15395 
15396 #endif // #if VMA_STATS_STRING_ENABLED
15397 
15398 /*
15399 This function is not protected by any mutex because it just reads immutable data.
15400 */
15401 VkResult vmaFindMemoryTypeIndex(
15402  VmaAllocator allocator,
15403  uint32_t memoryTypeBits,
15404  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15405  uint32_t* pMemoryTypeIndex)
15406 {
15407  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15408  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15409  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15410 
15411  if(pAllocationCreateInfo->memoryTypeBits != 0)
15412  {
15413  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
15414  }
15415 
15416  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
15417  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
15418 
15419  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
15420  if(mapped)
15421  {
15422  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15423  }
15424 
15425  // Convert usage to requiredFlags and preferredFlags.
15426  switch(pAllocationCreateInfo->usage)
15427  {
15429  break;
15431  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15432  {
15433  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15434  }
15435  break;
15437  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
15438  break;
15440  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15441  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
15442  {
15443  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
15444  }
15445  break;
15447  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
15448  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
15449  break;
15450  default:
15451  break;
15452  }
15453 
15454  *pMemoryTypeIndex = UINT32_MAX;
15455  uint32_t minCost = UINT32_MAX;
15456  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
15457  memTypeIndex < allocator->GetMemoryTypeCount();
15458  ++memTypeIndex, memTypeBit <<= 1)
15459  {
15460  // This memory type is acceptable according to memoryTypeBits bitmask.
15461  if((memTypeBit & memoryTypeBits) != 0)
15462  {
15463  const VkMemoryPropertyFlags currFlags =
15464  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
15465  // This memory type contains requiredFlags.
15466  if((requiredFlags & ~currFlags) == 0)
15467  {
15468  // Calculate cost as number of bits from preferredFlags not present in this memory type.
15469  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
15470  // Remember memory type with lowest cost.
15471  if(currCost < minCost)
15472  {
15473  *pMemoryTypeIndex = memTypeIndex;
15474  if(currCost == 0)
15475  {
15476  return VK_SUCCESS;
15477  }
15478  minCost = currCost;
15479  }
15480  }
15481  }
15482  }
15483  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
15484 }
15485 
15487  VmaAllocator allocator,
15488  const VkBufferCreateInfo* pBufferCreateInfo,
15489  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15490  uint32_t* pMemoryTypeIndex)
15491 {
15492  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15493  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
15494  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15495  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15496 
15497  const VkDevice hDev = allocator->m_hDevice;
15498  VkBuffer hBuffer = VK_NULL_HANDLE;
15499  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
15500  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
15501  if(res == VK_SUCCESS)
15502  {
15503  VkMemoryRequirements memReq = {};
15504  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
15505  hDev, hBuffer, &memReq);
15506 
15507  res = vmaFindMemoryTypeIndex(
15508  allocator,
15509  memReq.memoryTypeBits,
15510  pAllocationCreateInfo,
15511  pMemoryTypeIndex);
15512 
15513  allocator->GetVulkanFunctions().vkDestroyBuffer(
15514  hDev, hBuffer, allocator->GetAllocationCallbacks());
15515  }
15516  return res;
15517 }
15518 
15520  VmaAllocator allocator,
15521  const VkImageCreateInfo* pImageCreateInfo,
15522  const VmaAllocationCreateInfo* pAllocationCreateInfo,
15523  uint32_t* pMemoryTypeIndex)
15524 {
15525  VMA_ASSERT(allocator != VK_NULL_HANDLE);
15526  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
15527  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
15528  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
15529 
15530  const VkDevice hDev = allocator->m_hDevice;
15531  VkImage hImage = VK_NULL_HANDLE;
15532  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
15533  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
15534  if(res == VK_SUCCESS)
15535  {
15536  VkMemoryRequirements memReq = {};
15537  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
15538  hDev, hImage, &memReq);
15539 
15540  res = vmaFindMemoryTypeIndex(
15541  allocator,
15542  memReq.memoryTypeBits,
15543  pAllocationCreateInfo,
15544  pMemoryTypeIndex);
15545 
15546  allocator->GetVulkanFunctions().vkDestroyImage(
15547  hDev, hImage, allocator->GetAllocationCallbacks());
15548  }
15549  return res;
15550 }
15551 
15552 VkResult vmaCreatePool(
15553  VmaAllocator allocator,
15554  const VmaPoolCreateInfo* pCreateInfo,
15555  VmaPool* pPool)
15556 {
15557  VMA_ASSERT(allocator && pCreateInfo && pPool);
15558 
15559  VMA_DEBUG_LOG("vmaCreatePool");
15560 
15561  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15562 
15563  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
15564 
15565 #if VMA_RECORDING_ENABLED
15566  if(allocator->GetRecorder() != VMA_NULL)
15567  {
15568  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
15569  }
15570 #endif
15571 
15572  return res;
15573 }
15574 
15575 void vmaDestroyPool(
15576  VmaAllocator allocator,
15577  VmaPool pool)
15578 {
15579  VMA_ASSERT(allocator);
15580 
15581  if(pool == VK_NULL_HANDLE)
15582  {
15583  return;
15584  }
15585 
15586  VMA_DEBUG_LOG("vmaDestroyPool");
15587 
15588  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15589 
15590 #if VMA_RECORDING_ENABLED
15591  if(allocator->GetRecorder() != VMA_NULL)
15592  {
15593  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
15594  }
15595 #endif
15596 
15597  allocator->DestroyPool(pool);
15598 }
15599 
15600 void vmaGetPoolStats(
15601  VmaAllocator allocator,
15602  VmaPool pool,
15603  VmaPoolStats* pPoolStats)
15604 {
15605  VMA_ASSERT(allocator && pool && pPoolStats);
15606 
15607  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15608 
15609  allocator->GetPoolStats(pool, pPoolStats);
15610 }
15611 
15613  VmaAllocator allocator,
15614  VmaPool pool,
15615  size_t* pLostAllocationCount)
15616 {
15617  VMA_ASSERT(allocator && pool);
15618 
15619  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15620 
15621 #if VMA_RECORDING_ENABLED
15622  if(allocator->GetRecorder() != VMA_NULL)
15623  {
15624  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
15625  }
15626 #endif
15627 
15628  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
15629 }
15630 
15631 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
15632 {
15633  VMA_ASSERT(allocator && pool);
15634 
15635  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15636 
15637  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
15638 
15639  return allocator->CheckPoolCorruption(pool);
15640 }
15641 
15642 VkResult vmaAllocateMemory(
15643  VmaAllocator allocator,
15644  const VkMemoryRequirements* pVkMemoryRequirements,
15645  const VmaAllocationCreateInfo* pCreateInfo,
15646  VmaAllocation* pAllocation,
15647  VmaAllocationInfo* pAllocationInfo)
15648 {
15649  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
15650 
15651  VMA_DEBUG_LOG("vmaAllocateMemory");
15652 
15653  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15654 
15655  VkResult result = allocator->AllocateMemory(
15656  *pVkMemoryRequirements,
15657  false, // requiresDedicatedAllocation
15658  false, // prefersDedicatedAllocation
15659  VK_NULL_HANDLE, // dedicatedBuffer
15660  VK_NULL_HANDLE, // dedicatedImage
15661  *pCreateInfo,
15662  VMA_SUBALLOCATION_TYPE_UNKNOWN,
15663  pAllocation);
15664 
15665 #if VMA_RECORDING_ENABLED
15666  if(allocator->GetRecorder() != VMA_NULL)
15667  {
15668  allocator->GetRecorder()->RecordAllocateMemory(
15669  allocator->GetCurrentFrameIndex(),
15670  *pVkMemoryRequirements,
15671  *pCreateInfo,
15672  *pAllocation);
15673  }
15674 #endif
15675 
15676  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
15677  {
15678  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15679  }
15680 
15681  return result;
15682 }
15683 
15685  VmaAllocator allocator,
15686  VkBuffer buffer,
15687  const VmaAllocationCreateInfo* pCreateInfo,
15688  VmaAllocation* pAllocation,
15689  VmaAllocationInfo* pAllocationInfo)
15690 {
15691  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
15692 
15693  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
15694 
15695  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15696 
15697  VkMemoryRequirements vkMemReq = {};
15698  bool requiresDedicatedAllocation = false;
15699  bool prefersDedicatedAllocation = false;
15700  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
15701  requiresDedicatedAllocation,
15702  prefersDedicatedAllocation);
15703 
15704  VkResult result = allocator->AllocateMemory(
15705  vkMemReq,
15706  requiresDedicatedAllocation,
15707  prefersDedicatedAllocation,
15708  buffer, // dedicatedBuffer
15709  VK_NULL_HANDLE, // dedicatedImage
15710  *pCreateInfo,
15711  VMA_SUBALLOCATION_TYPE_BUFFER,
15712  pAllocation);
15713 
15714 #if VMA_RECORDING_ENABLED
15715  if(allocator->GetRecorder() != VMA_NULL)
15716  {
15717  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
15718  allocator->GetCurrentFrameIndex(),
15719  vkMemReq,
15720  requiresDedicatedAllocation,
15721  prefersDedicatedAllocation,
15722  *pCreateInfo,
15723  *pAllocation);
15724  }
15725 #endif
15726 
15727  if(pAllocationInfo && result == VK_SUCCESS)
15728  {
15729  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15730  }
15731 
15732  return result;
15733 }
15734 
15735 VkResult vmaAllocateMemoryForImage(
15736  VmaAllocator allocator,
15737  VkImage image,
15738  const VmaAllocationCreateInfo* pCreateInfo,
15739  VmaAllocation* pAllocation,
15740  VmaAllocationInfo* pAllocationInfo)
15741 {
15742  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
15743 
15744  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
15745 
15746  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15747 
15748  VkMemoryRequirements vkMemReq = {};
15749  bool requiresDedicatedAllocation = false;
15750  bool prefersDedicatedAllocation = false;
15751  allocator->GetImageMemoryRequirements(image, vkMemReq,
15752  requiresDedicatedAllocation, prefersDedicatedAllocation);
15753 
15754  VkResult result = allocator->AllocateMemory(
15755  vkMemReq,
15756  requiresDedicatedAllocation,
15757  prefersDedicatedAllocation,
15758  VK_NULL_HANDLE, // dedicatedBuffer
15759  image, // dedicatedImage
15760  *pCreateInfo,
15761  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
15762  pAllocation);
15763 
15764 #if VMA_RECORDING_ENABLED
15765  if(allocator->GetRecorder() != VMA_NULL)
15766  {
15767  allocator->GetRecorder()->RecordAllocateMemoryForImage(
15768  allocator->GetCurrentFrameIndex(),
15769  vkMemReq,
15770  requiresDedicatedAllocation,
15771  prefersDedicatedAllocation,
15772  *pCreateInfo,
15773  *pAllocation);
15774  }
15775 #endif
15776 
15777  if(pAllocationInfo && result == VK_SUCCESS)
15778  {
15779  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
15780  }
15781 
15782  return result;
15783 }
15784 
15785 void vmaFreeMemory(
15786  VmaAllocator allocator,
15787  VmaAllocation allocation)
15788 {
15789  VMA_ASSERT(allocator);
15790 
15791  if(allocation == VK_NULL_HANDLE)
15792  {
15793  return;
15794  }
15795 
15796  VMA_DEBUG_LOG("vmaFreeMemory");
15797 
15798  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15799 
15800 #if VMA_RECORDING_ENABLED
15801  if(allocator->GetRecorder() != VMA_NULL)
15802  {
15803  allocator->GetRecorder()->RecordFreeMemory(
15804  allocator->GetCurrentFrameIndex(),
15805  allocation);
15806  }
15807 #endif
15808 
15809  allocator->FreeMemory(allocation);
15810 }
15811 
15812 VkResult vmaResizeAllocation(
15813  VmaAllocator allocator,
15814  VmaAllocation allocation,
15815  VkDeviceSize newSize)
15816 {
15817  VMA_ASSERT(allocator && allocation);
15818 
15819  VMA_DEBUG_LOG("vmaResizeAllocation");
15820 
15821  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15822 
15823 #if VMA_RECORDING_ENABLED
15824  if(allocator->GetRecorder() != VMA_NULL)
15825  {
15826  allocator->GetRecorder()->RecordResizeAllocation(
15827  allocator->GetCurrentFrameIndex(),
15828  allocation,
15829  newSize);
15830  }
15831 #endif
15832 
15833  return allocator->ResizeAllocation(allocation, newSize);
15834 }
15835 
15837  VmaAllocator allocator,
15838  VmaAllocation allocation,
15839  VmaAllocationInfo* pAllocationInfo)
15840 {
15841  VMA_ASSERT(allocator && allocation && pAllocationInfo);
15842 
15843  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15844 
15845 #if VMA_RECORDING_ENABLED
15846  if(allocator->GetRecorder() != VMA_NULL)
15847  {
15848  allocator->GetRecorder()->RecordGetAllocationInfo(
15849  allocator->GetCurrentFrameIndex(),
15850  allocation);
15851  }
15852 #endif
15853 
15854  allocator->GetAllocationInfo(allocation, pAllocationInfo);
15855 }
15856 
15857 VkBool32 vmaTouchAllocation(
15858  VmaAllocator allocator,
15859  VmaAllocation allocation)
15860 {
15861  VMA_ASSERT(allocator && allocation);
15862 
15863  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15864 
15865 #if VMA_RECORDING_ENABLED
15866  if(allocator->GetRecorder() != VMA_NULL)
15867  {
15868  allocator->GetRecorder()->RecordTouchAllocation(
15869  allocator->GetCurrentFrameIndex(),
15870  allocation);
15871  }
15872 #endif
15873 
15874  return allocator->TouchAllocation(allocation);
15875 }
15876 
15878  VmaAllocator allocator,
15879  VmaAllocation allocation,
15880  void* pUserData)
15881 {
15882  VMA_ASSERT(allocator && allocation);
15883 
15884  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15885 
15886  allocation->SetUserData(allocator, pUserData);
15887 
15888 #if VMA_RECORDING_ENABLED
15889  if(allocator->GetRecorder() != VMA_NULL)
15890  {
15891  allocator->GetRecorder()->RecordSetAllocationUserData(
15892  allocator->GetCurrentFrameIndex(),
15893  allocation,
15894  pUserData);
15895  }
15896 #endif
15897 }
15898 
15900  VmaAllocator allocator,
15901  VmaAllocation* pAllocation)
15902 {
15903  VMA_ASSERT(allocator && pAllocation);
15904 
15905  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
15906 
15907  allocator->CreateLostAllocation(pAllocation);
15908 
15909 #if VMA_RECORDING_ENABLED
15910  if(allocator->GetRecorder() != VMA_NULL)
15911  {
15912  allocator->GetRecorder()->RecordCreateLostAllocation(
15913  allocator->GetCurrentFrameIndex(),
15914  *pAllocation);
15915  }
15916 #endif
15917 }
15918 
15919 VkResult vmaMapMemory(
15920  VmaAllocator allocator,
15921  VmaAllocation allocation,
15922  void** ppData)
15923 {
15924  VMA_ASSERT(allocator && allocation && ppData);
15925 
15926  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15927 
15928  VkResult res = allocator->Map(allocation, ppData);
15929 
15930 #if VMA_RECORDING_ENABLED
15931  if(allocator->GetRecorder() != VMA_NULL)
15932  {
15933  allocator->GetRecorder()->RecordMapMemory(
15934  allocator->GetCurrentFrameIndex(),
15935  allocation);
15936  }
15937 #endif
15938 
15939  return res;
15940 }
15941 
15942 void vmaUnmapMemory(
15943  VmaAllocator allocator,
15944  VmaAllocation allocation)
15945 {
15946  VMA_ASSERT(allocator && allocation);
15947 
15948  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15949 
15950 #if VMA_RECORDING_ENABLED
15951  if(allocator->GetRecorder() != VMA_NULL)
15952  {
15953  allocator->GetRecorder()->RecordUnmapMemory(
15954  allocator->GetCurrentFrameIndex(),
15955  allocation);
15956  }
15957 #endif
15958 
15959  allocator->Unmap(allocation);
15960 }
15961 
15962 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15963 {
15964  VMA_ASSERT(allocator && allocation);
15965 
15966  VMA_DEBUG_LOG("vmaFlushAllocation");
15967 
15968  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15969 
15970  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
15971 
15972 #if VMA_RECORDING_ENABLED
15973  if(allocator->GetRecorder() != VMA_NULL)
15974  {
15975  allocator->GetRecorder()->RecordFlushAllocation(
15976  allocator->GetCurrentFrameIndex(),
15977  allocation, offset, size);
15978  }
15979 #endif
15980 }
15981 
15982 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15983 {
15984  VMA_ASSERT(allocator && allocation);
15985 
15986  VMA_DEBUG_LOG("vmaInvalidateAllocation");
15987 
15988  VMA_DEBUG_GLOBAL_MUTEX_LOCK
15989 
15990  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
15991 
15992 #if VMA_RECORDING_ENABLED
15993  if(allocator->GetRecorder() != VMA_NULL)
15994  {
15995  allocator->GetRecorder()->RecordInvalidateAllocation(
15996  allocator->GetCurrentFrameIndex(),
15997  allocation, offset, size);
15998  }
15999 #endif
16000 }
16001 
16002 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
16003 {
16004  VMA_ASSERT(allocator);
16005 
16006  VMA_DEBUG_LOG("vmaCheckCorruption");
16007 
16008  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16009 
16010  return allocator->CheckCorruption(memoryTypeBits);
16011 }
16012 
16013 VkResult vmaDefragment(
16014  VmaAllocator allocator,
16015  VmaAllocation* pAllocations,
16016  size_t allocationCount,
16017  VkBool32* pAllocationsChanged,
16018  const VmaDefragmentationInfo *pDefragmentationInfo,
16019  VmaDefragmentationStats* pDefragmentationStats)
16020 {
16021  // Deprecated interface, reimplemented using new one.
16022 
16023  VmaDefragmentationInfo2 info2 = {};
16024  info2.allocationCount = (uint32_t)allocationCount;
16025  info2.pAllocations = pAllocations;
16026  info2.pAllocationsChanged = pAllocationsChanged;
16027  if(pDefragmentationInfo != VMA_NULL)
16028  {
16029  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
16030  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
16031  }
16032  else
16033  {
16034  info2.maxCpuAllocationsToMove = UINT32_MAX;
16035  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
16036  }
16037  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
16038 
16040  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
16041  if(res == VK_NOT_READY)
16042  {
16043  res = vmaDefragmentationEnd( allocator, ctx);
16044  }
16045  return res;
16046 }
16047 
16048 VkResult vmaDefragmentationBegin(
16049  VmaAllocator allocator,
16050  const VmaDefragmentationInfo2* pInfo,
16051  VmaDefragmentationStats* pStats,
16052  VmaDefragmentationContext *pContext)
16053 {
16054  VMA_ASSERT(allocator && pInfo && pContext);
16055 
16056  // Degenerate case: Nothing to defragment.
16057  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
16058  {
16059  return VK_SUCCESS;
16060  }
16061 
16062  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
16063  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
16064  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
16065  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
16066 
16067  VMA_DEBUG_LOG("vmaDefragmentationBegin");
16068 
16069  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16070 
16071  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
16072 
16073 #if VMA_RECORDING_ENABLED
16074  if(allocator->GetRecorder() != VMA_NULL)
16075  {
16076  allocator->GetRecorder()->RecordDefragmentationBegin(
16077  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
16078  }
16079 #endif
16080 
16081  return res;
16082 }
16083 
16084 VkResult vmaDefragmentationEnd(
16085  VmaAllocator allocator,
16086  VmaDefragmentationContext context)
16087 {
16088  VMA_ASSERT(allocator);
16089 
16090  VMA_DEBUG_LOG("vmaDefragmentationEnd");
16091 
16092  if(context != VK_NULL_HANDLE)
16093  {
16094  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16095 
16096 #if VMA_RECORDING_ENABLED
16097  if(allocator->GetRecorder() != VMA_NULL)
16098  {
16099  allocator->GetRecorder()->RecordDefragmentationEnd(
16100  allocator->GetCurrentFrameIndex(), context);
16101  }
16102 #endif
16103 
16104  return allocator->DefragmentationEnd(context);
16105  }
16106  else
16107  {
16108  return VK_SUCCESS;
16109  }
16110 }
16111 
16112 VkResult vmaBindBufferMemory(
16113  VmaAllocator allocator,
16114  VmaAllocation allocation,
16115  VkBuffer buffer)
16116 {
16117  VMA_ASSERT(allocator && allocation && buffer);
16118 
16119  VMA_DEBUG_LOG("vmaBindBufferMemory");
16120 
16121  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16122 
16123  return allocator->BindBufferMemory(allocation, buffer);
16124 }
16125 
16126 VkResult vmaBindImageMemory(
16127  VmaAllocator allocator,
16128  VmaAllocation allocation,
16129  VkImage image)
16130 {
16131  VMA_ASSERT(allocator && allocation && image);
16132 
16133  VMA_DEBUG_LOG("vmaBindImageMemory");
16134 
16135  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16136 
16137  return allocator->BindImageMemory(allocation, image);
16138 }
16139 
16140 VkResult vmaCreateBuffer(
16141  VmaAllocator allocator,
16142  const VkBufferCreateInfo* pBufferCreateInfo,
16143  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16144  VkBuffer* pBuffer,
16145  VmaAllocation* pAllocation,
16146  VmaAllocationInfo* pAllocationInfo)
16147 {
16148  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
16149 
16150  if(pBufferCreateInfo->size == 0)
16151  {
16152  return VK_ERROR_VALIDATION_FAILED_EXT;
16153  }
16154 
16155  VMA_DEBUG_LOG("vmaCreateBuffer");
16156 
16157  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16158 
16159  *pBuffer = VK_NULL_HANDLE;
16160  *pAllocation = VK_NULL_HANDLE;
16161 
16162  // 1. Create VkBuffer.
16163  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
16164  allocator->m_hDevice,
16165  pBufferCreateInfo,
16166  allocator->GetAllocationCallbacks(),
16167  pBuffer);
16168  if(res >= 0)
16169  {
16170  // 2. vkGetBufferMemoryRequirements.
16171  VkMemoryRequirements vkMemReq = {};
16172  bool requiresDedicatedAllocation = false;
16173  bool prefersDedicatedAllocation = false;
16174  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
16175  requiresDedicatedAllocation, prefersDedicatedAllocation);
16176 
16177  // Make sure alignment requirements for specific buffer usages reported
16178  // in Physical Device Properties are included in alignment reported by memory requirements.
16179  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
16180  {
16181  VMA_ASSERT(vkMemReq.alignment %
16182  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
16183  }
16184  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
16185  {
16186  VMA_ASSERT(vkMemReq.alignment %
16187  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
16188  }
16189  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
16190  {
16191  VMA_ASSERT(vkMemReq.alignment %
16192  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
16193  }
16194 
16195  // 3. Allocate memory using allocator.
16196  res = allocator->AllocateMemory(
16197  vkMemReq,
16198  requiresDedicatedAllocation,
16199  prefersDedicatedAllocation,
16200  *pBuffer, // dedicatedBuffer
16201  VK_NULL_HANDLE, // dedicatedImage
16202  *pAllocationCreateInfo,
16203  VMA_SUBALLOCATION_TYPE_BUFFER,
16204  pAllocation);
16205 
16206 #if VMA_RECORDING_ENABLED
16207  if(allocator->GetRecorder() != VMA_NULL)
16208  {
16209  allocator->GetRecorder()->RecordCreateBuffer(
16210  allocator->GetCurrentFrameIndex(),
16211  *pBufferCreateInfo,
16212  *pAllocationCreateInfo,
16213  *pAllocation);
16214  }
16215 #endif
16216 
16217  if(res >= 0)
16218  {
16219  // 3. Bind buffer with memory.
16220  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
16221  if(res >= 0)
16222  {
16223  // All steps succeeded.
16224  #if VMA_STATS_STRING_ENABLED
16225  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
16226  #endif
16227  if(pAllocationInfo != VMA_NULL)
16228  {
16229  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16230  }
16231 
16232  return VK_SUCCESS;
16233  }
16234  allocator->FreeMemory(*pAllocation);
16235  *pAllocation = VK_NULL_HANDLE;
16236  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16237  *pBuffer = VK_NULL_HANDLE;
16238  return res;
16239  }
16240  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
16241  *pBuffer = VK_NULL_HANDLE;
16242  return res;
16243  }
16244  return res;
16245 }
16246 
16247 void vmaDestroyBuffer(
16248  VmaAllocator allocator,
16249  VkBuffer buffer,
16250  VmaAllocation allocation)
16251 {
16252  VMA_ASSERT(allocator);
16253 
16254  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16255  {
16256  return;
16257  }
16258 
16259  VMA_DEBUG_LOG("vmaDestroyBuffer");
16260 
16261  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16262 
16263 #if VMA_RECORDING_ENABLED
16264  if(allocator->GetRecorder() != VMA_NULL)
16265  {
16266  allocator->GetRecorder()->RecordDestroyBuffer(
16267  allocator->GetCurrentFrameIndex(),
16268  allocation);
16269  }
16270 #endif
16271 
16272  if(buffer != VK_NULL_HANDLE)
16273  {
16274  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
16275  }
16276 
16277  if(allocation != VK_NULL_HANDLE)
16278  {
16279  allocator->FreeMemory(allocation);
16280  }
16281 }
16282 
16283 VkResult vmaCreateImage(
16284  VmaAllocator allocator,
16285  const VkImageCreateInfo* pImageCreateInfo,
16286  const VmaAllocationCreateInfo* pAllocationCreateInfo,
16287  VkImage* pImage,
16288  VmaAllocation* pAllocation,
16289  VmaAllocationInfo* pAllocationInfo)
16290 {
16291  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
16292 
16293  if(pImageCreateInfo->extent.width == 0 ||
16294  pImageCreateInfo->extent.height == 0 ||
16295  pImageCreateInfo->extent.depth == 0 ||
16296  pImageCreateInfo->mipLevels == 0 ||
16297  pImageCreateInfo->arrayLayers == 0)
16298  {
16299  return VK_ERROR_VALIDATION_FAILED_EXT;
16300  }
16301 
16302  VMA_DEBUG_LOG("vmaCreateImage");
16303 
16304  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16305 
16306  *pImage = VK_NULL_HANDLE;
16307  *pAllocation = VK_NULL_HANDLE;
16308 
16309  // 1. Create VkImage.
16310  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
16311  allocator->m_hDevice,
16312  pImageCreateInfo,
16313  allocator->GetAllocationCallbacks(),
16314  pImage);
16315  if(res >= 0)
16316  {
16317  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
16318  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
16319  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
16320 
16321  // 2. Allocate memory using allocator.
16322  VkMemoryRequirements vkMemReq = {};
16323  bool requiresDedicatedAllocation = false;
16324  bool prefersDedicatedAllocation = false;
16325  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
16326  requiresDedicatedAllocation, prefersDedicatedAllocation);
16327 
16328  res = allocator->AllocateMemory(
16329  vkMemReq,
16330  requiresDedicatedAllocation,
16331  prefersDedicatedAllocation,
16332  VK_NULL_HANDLE, // dedicatedBuffer
16333  *pImage, // dedicatedImage
16334  *pAllocationCreateInfo,
16335  suballocType,
16336  pAllocation);
16337 
16338 #if VMA_RECORDING_ENABLED
16339  if(allocator->GetRecorder() != VMA_NULL)
16340  {
16341  allocator->GetRecorder()->RecordCreateImage(
16342  allocator->GetCurrentFrameIndex(),
16343  *pImageCreateInfo,
16344  *pAllocationCreateInfo,
16345  *pAllocation);
16346  }
16347 #endif
16348 
16349  if(res >= 0)
16350  {
16351  // 3. Bind image with memory.
16352  res = allocator->BindImageMemory(*pAllocation, *pImage);
16353  if(res >= 0)
16354  {
16355  // All steps succeeded.
16356  #if VMA_STATS_STRING_ENABLED
16357  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
16358  #endif
16359  if(pAllocationInfo != VMA_NULL)
16360  {
16361  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
16362  }
16363 
16364  return VK_SUCCESS;
16365  }
16366  allocator->FreeMemory(*pAllocation);
16367  *pAllocation = VK_NULL_HANDLE;
16368  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16369  *pImage = VK_NULL_HANDLE;
16370  return res;
16371  }
16372  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
16373  *pImage = VK_NULL_HANDLE;
16374  return res;
16375  }
16376  return res;
16377 }
16378 
16379 void vmaDestroyImage(
16380  VmaAllocator allocator,
16381  VkImage image,
16382  VmaAllocation allocation)
16383 {
16384  VMA_ASSERT(allocator);
16385 
16386  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
16387  {
16388  return;
16389  }
16390 
16391  VMA_DEBUG_LOG("vmaDestroyImage");
16392 
16393  VMA_DEBUG_GLOBAL_MUTEX_LOCK
16394 
16395 #if VMA_RECORDING_ENABLED
16396  if(allocator->GetRecorder() != VMA_NULL)
16397  {
16398  allocator->GetRecorder()->RecordDestroyImage(
16399  allocator->GetCurrentFrameIndex(),
16400  allocation);
16401  }
16402 #endif
16403 
16404  if(image != VK_NULL_HANDLE)
16405  {
16406  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
16407  }
16408  if(allocation != VK_NULL_HANDLE)
16409  {
16410  allocator->FreeMemory(allocation);
16411  }
16412 }
16413 
16414 #endif // #ifdef VMA_IMPLEMENTATION
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:1727
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2030
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:1785
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side...
Definition: vk_mem_alloc.h:2782
VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
Represents single memory allocation.
Definition: vk_mem_alloc.h:1759
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:2355
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:1739
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
struct VmaStats VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1987
Definition: vk_mem_alloc.h:2090
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:2735
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:1731
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:2455
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:1782
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:2818
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2244
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:1626
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:2336
Definition: vk_mem_alloc.h:2067
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:2738
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:1720
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2143
Definition: vk_mem_alloc.h:2014
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:1794
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2272
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null if you leave define VMA_STATIC_VULKAN_FUNCTIONS 1...
Definition: vk_mem_alloc.h:1848
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:1779
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2018
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:1920
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:1736
VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:2772
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:1919
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:2822
void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:1811
VmaStatInfo total
Definition: vk_mem_alloc.h:1929
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:2830
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2127
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2813
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:1737
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:1662
Represents main object of this library initialized.
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:1788
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2286
Definition: vk_mem_alloc.h:2280
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:1743
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:1855
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:2465
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:1732
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:1757
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2164
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:2306
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost...
Definition: vk_mem_alloc.h:2342
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1718
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:2289
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2787
VmaMemoryUsage
Definition: vk_mem_alloc.h:1965
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:2747
void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:2808
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects...
Definition: vk_mem_alloc.h:2826
Definition: vk_mem_alloc.h:2004
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2151
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:1735
Represents custom memory pool.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:1925
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:1668
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:2726
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
Definition: vk_mem_alloc.h:2724
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:2753
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:1689
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:1761
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:1694
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:2828
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2138
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:2352
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:1728
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:1908
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes. Optional.
Definition: vk_mem_alloc.h:2301
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:1681
Definition: vk_mem_alloc.h:2276
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2074
Represents Opaque object that represents started defragmentation process.
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:1921
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:1685
Definition: vk_mem_alloc.h:2101
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:2292
Definition: vk_mem_alloc.h:2013
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:1734
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2133
Definition: vk_mem_alloc.h:2124
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:1911
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:1730
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:2314
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:1797
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2345
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2122
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:2777
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2157
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:1836
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:1927
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it...
Definition: vk_mem_alloc.h:2054
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:1920
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:1741
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:1767
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use...
Definition: vk_mem_alloc.h:2723
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:2801
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:1683
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:1740
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2328
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:1733
Definition: vk_mem_alloc.h:2085
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:1775
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:2479
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB...
Definition: vk_mem_alloc.h:1791
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:1920
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:1917
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:2333
Parameters for defragmentation.
Definition: vk_mem_alloc.h:2732
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions...
Definition: vk_mem_alloc.h:2094
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory, offset) pair is unique to this allocation.
Definition: vk_mem_alloc.h:2460
Definition: vk_mem_alloc.h:2108
Definition: vk_mem_alloc.h:2120
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places...
Definition: vk_mem_alloc.h:2824
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:1726
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:1915
Definition: vk_mem_alloc.h:1970
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2282
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:1764
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:1913
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:1738
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:1742
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2041
Definition: vk_mem_alloc.h:2115
Definition: vk_mem_alloc.h:1997
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:2474
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:1716
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:1729
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2261
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Tries to resize an allocation in place, if there is enough free memory after it.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:2441
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Definition: vk_mem_alloc.h:2105
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2226
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:1921
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:1751
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:1928
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:2339
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:1921
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side...
Definition: vk_mem_alloc.h:2792
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:2446
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:2756