diff --git a/docs/html/vk__mem__alloc_8h.html b/docs/html/vk__mem__alloc_8h.html index 0a89157..01e8766 100644 --- a/docs/html/vk__mem__alloc_8h.html +++ b/docs/html/vk__mem__alloc_8h.html @@ -2244,8 +2244,9 @@ Functions
  • size can be VK_WHOLE_SIZE. It means all memory from offset the the end of given allocation.
  • offset and size don't have to be aligned. They are internally rounded down/up to multiply of nonCoherentAtomSize.
  • If size is 0, this call is ignored.
  • -
  • If memory type that the allocation belongs to is not HOST_VISIBLE or it is HOST_COHERENT, this call is ignored.
  • +
  • If memory type that the allocation belongs to is not HOST_VISIBLE or it is HOST_COHERENT, this call is ignored.
  • +

    Warning! offset and size are relative to the contents of given allocation. If you mean whole allocation, you can pass 0 and VK_WHOLE_SIZE, respectively. Do not pass allocation's offset as offset!!!

    @@ -2573,8 +2574,9 @@ Functions
  • size can be VK_WHOLE_SIZE. It means all memory from offset the the end of given allocation.
  • offset and size don't have to be aligned. They are internally rounded down/up to multiply of nonCoherentAtomSize.
  • If size is 0, this call is ignored.
  • -
  • If memory type that the allocation belongs to is not HOST_VISIBLE or it is HOST_COHERENT, this call is ignored.
  • +
  • If memory type that the allocation belongs to is not HOST_VISIBLE or it is HOST_COHERENT, this call is ignored.
  • +

    Warning! offset and size are relative to the contents of given allocation. If you mean whole allocation, you can pass 0 and VK_WHOLE_SIZE, respectively. Do not pass allocation's offset as offset!!!

    diff --git a/docs/html/vk__mem__alloc_8h_source.html b/docs/html/vk__mem__alloc_8h_source.html index e3c14a2..1e2e6ec 100644 --- a/docs/html/vk__mem__alloc_8h_source.html +++ b/docs/html/vk__mem__alloc_8h_source.html @@ -65,11 +65,11 @@ $(function() {
    vk_mem_alloc.h
    -Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1644 /*
    1645 Define this macro to 0/1 to disable/enable support for recording functionality,
    1646 available through VmaAllocatorCreateInfo::pRecordSettings.
    1647 */
    1648 #ifndef VMA_RECORDING_ENABLED
    1649  #ifdef _WIN32
    1650  #define VMA_RECORDING_ENABLED 1
    1651  #else
    1652  #define VMA_RECORDING_ENABLED 0
    1653  #endif
    1654 #endif
    1655 
    1656 #ifndef NOMINMAX
    1657  #define NOMINMAX // For windows.h
    1658 #endif
    1659 
    1660 #ifndef VULKAN_H_
    1661  #include <vulkan/vulkan.h>
    1662 #endif
    1663 
    1664 #if VMA_RECORDING_ENABLED
    1665  #include <windows.h>
    1666 #endif
    1667 
    1668 #if !defined(VMA_DEDICATED_ALLOCATION)
    1669  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1670  #define VMA_DEDICATED_ALLOCATION 1
    1671  #else
    1672  #define VMA_DEDICATED_ALLOCATION 0
    1673  #endif
    1674 #endif
    1675 
    1685 VK_DEFINE_HANDLE(VmaAllocator)
    1686 
    1687 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1689  VmaAllocator allocator,
    1690  uint32_t memoryType,
    1691  VkDeviceMemory memory,
    1692  VkDeviceSize size);
    1694 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1695  VmaAllocator allocator,
    1696  uint32_t memoryType,
    1697  VkDeviceMemory memory,
    1698  VkDeviceSize size);
    1699 
    1713 
    1743 
    1746 typedef VkFlags VmaAllocatorCreateFlags;
    1747 
    1752 typedef struct VmaVulkanFunctions {
    1753  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1754  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1755  PFN_vkAllocateMemory vkAllocateMemory;
    1756  PFN_vkFreeMemory vkFreeMemory;
    1757  PFN_vkMapMemory vkMapMemory;
    1758  PFN_vkUnmapMemory vkUnmapMemory;
    1759  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1760  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1761  PFN_vkBindBufferMemory vkBindBufferMemory;
    1762  PFN_vkBindImageMemory vkBindImageMemory;
    1763  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1764  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1765  PFN_vkCreateBuffer vkCreateBuffer;
    1766  PFN_vkDestroyBuffer vkDestroyBuffer;
    1767  PFN_vkCreateImage vkCreateImage;
    1768  PFN_vkDestroyImage vkDestroyImage;
    1769  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
    1770 #if VMA_DEDICATED_ALLOCATION
    1771  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1772  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1773 #endif
    1775 
    1777 typedef enum VmaRecordFlagBits {
    1784 
    1787 typedef VkFlags VmaRecordFlags;
    1788 
    1790 typedef struct VmaRecordSettings
    1791 {
    1801  const char* pFilePath;
    1803 
    1806 {
    1810 
    1811  VkPhysicalDevice physicalDevice;
    1813 
    1814  VkDevice device;
    1816 
    1819 
    1820  const VkAllocationCallbacks* pAllocationCallbacks;
    1822 
    1862  const VkDeviceSize* pHeapSizeLimit;
    1883 
    1885 VkResult vmaCreateAllocator(
    1886  const VmaAllocatorCreateInfo* pCreateInfo,
    1887  VmaAllocator* pAllocator);
    1888 
    1890 void vmaDestroyAllocator(
    1891  VmaAllocator allocator);
    1892 
    1898  VmaAllocator allocator,
    1899  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1900 
    1906  VmaAllocator allocator,
    1907  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1908 
    1916  VmaAllocator allocator,
    1917  uint32_t memoryTypeIndex,
    1918  VkMemoryPropertyFlags* pFlags);
    1919 
    1929  VmaAllocator allocator,
    1930  uint32_t frameIndex);
    1931 
    1934 typedef struct VmaStatInfo
    1935 {
    1937  uint32_t blockCount;
    1943  VkDeviceSize usedBytes;
    1945  VkDeviceSize unusedBytes;
    1948 } VmaStatInfo;
    1949 
    1951 typedef struct VmaStats
    1952 {
    1953  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1954  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1956 } VmaStats;
    1957 
    1959 void vmaCalculateStats(
    1960  VmaAllocator allocator,
    1961  VmaStats* pStats);
    1962 
    1963 #ifndef VMA_STATS_STRING_ENABLED
    1964 #define VMA_STATS_STRING_ENABLED 1
    1965 #endif
    1966 
    1967 #if VMA_STATS_STRING_ENABLED
    1968 
    1970 
    1972 void vmaBuildStatsString(
    1973  VmaAllocator allocator,
    1974  char** ppStatsString,
    1975  VkBool32 detailedMap);
    1976 
    1977 void vmaFreeStatsString(
    1978  VmaAllocator allocator,
    1979  char* pStatsString);
    1980 
    1981 #endif // #if VMA_STATS_STRING_ENABLED
    1982 
    1991 VK_DEFINE_HANDLE(VmaPool)
    1992 
    1993 typedef enum VmaMemoryUsage
    1994 {
    2043 } VmaMemoryUsage;
    2044 
    2054 
    2115 
    2131 
    2141 
    2148 
    2152 
    2154 {
    2167  VkMemoryPropertyFlags requiredFlags;
    2172  VkMemoryPropertyFlags preferredFlags;
    2180  uint32_t memoryTypeBits;
    2193  void* pUserData;
    2195 
    2212 VkResult vmaFindMemoryTypeIndex(
    2213  VmaAllocator allocator,
    2214  uint32_t memoryTypeBits,
    2215  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2216  uint32_t* pMemoryTypeIndex);
    2217 
    2231  VmaAllocator allocator,
    2232  const VkBufferCreateInfo* pBufferCreateInfo,
    2233  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2234  uint32_t* pMemoryTypeIndex);
    2235 
    2249  VmaAllocator allocator,
    2250  const VkImageCreateInfo* pImageCreateInfo,
    2251  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2252  uint32_t* pMemoryTypeIndex);
    2253 
    2274 
    2291 
    2302 
    2308 
    2311 typedef VkFlags VmaPoolCreateFlags;
    2312 
    2315 typedef struct VmaPoolCreateInfo {
    2330  VkDeviceSize blockSize;
    2359 
    2362 typedef struct VmaPoolStats {
    2365  VkDeviceSize size;
    2368  VkDeviceSize unusedSize;
    2381  VkDeviceSize unusedRangeSizeMax;
    2384  size_t blockCount;
    2385 } VmaPoolStats;
    2386 
    2393 VkResult vmaCreatePool(
    2394  VmaAllocator allocator,
    2395  const VmaPoolCreateInfo* pCreateInfo,
    2396  VmaPool* pPool);
    2397 
    2400 void vmaDestroyPool(
    2401  VmaAllocator allocator,
    2402  VmaPool pool);
    2403 
    2410 void vmaGetPoolStats(
    2411  VmaAllocator allocator,
    2412  VmaPool pool,
    2413  VmaPoolStats* pPoolStats);
    2414 
    2422  VmaAllocator allocator,
    2423  VmaPool pool,
    2424  size_t* pLostAllocationCount);
    2425 
    2440 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2441 
    2466 VK_DEFINE_HANDLE(VmaAllocation)
    2467 
    2468 
    2470 typedef struct VmaAllocationInfo {
    2475  uint32_t memoryType;
    2484  VkDeviceMemory deviceMemory;
    2489  VkDeviceSize offset;
    2494  VkDeviceSize size;
    2508  void* pUserData;
    2510 
    2521 VkResult vmaAllocateMemory(
    2522  VmaAllocator allocator,
    2523  const VkMemoryRequirements* pVkMemoryRequirements,
    2524  const VmaAllocationCreateInfo* pCreateInfo,
    2525  VmaAllocation* pAllocation,
    2526  VmaAllocationInfo* pAllocationInfo);
    2527 
    2547 VkResult vmaAllocateMemoryPages(
    2548  VmaAllocator allocator,
    2549  const VkMemoryRequirements* pVkMemoryRequirements,
    2550  const VmaAllocationCreateInfo* pCreateInfo,
    2551  size_t allocationCount,
    2552  VmaAllocation* pAllocations,
    2553  VmaAllocationInfo* pAllocationInfo);
    2554 
    2562  VmaAllocator allocator,
    2563  VkBuffer buffer,
    2564  const VmaAllocationCreateInfo* pCreateInfo,
    2565  VmaAllocation* pAllocation,
    2566  VmaAllocationInfo* pAllocationInfo);
    2567 
    2569 VkResult vmaAllocateMemoryForImage(
    2570  VmaAllocator allocator,
    2571  VkImage image,
    2572  const VmaAllocationCreateInfo* pCreateInfo,
    2573  VmaAllocation* pAllocation,
    2574  VmaAllocationInfo* pAllocationInfo);
    2575 
    2580 void vmaFreeMemory(
    2581  VmaAllocator allocator,
    2582  VmaAllocation allocation);
    2583 
    2594 void vmaFreeMemoryPages(
    2595  VmaAllocator allocator,
    2596  size_t allocationCount,
    2597  VmaAllocation* pAllocations);
    2598 
    2619 VkResult vmaResizeAllocation(
    2620  VmaAllocator allocator,
    2621  VmaAllocation allocation,
    2622  VkDeviceSize newSize);
    2623 
    2641  VmaAllocator allocator,
    2642  VmaAllocation allocation,
    2643  VmaAllocationInfo* pAllocationInfo);
    2644 
    2659 VkBool32 vmaTouchAllocation(
    2660  VmaAllocator allocator,
    2661  VmaAllocation allocation);
    2662 
    2677  VmaAllocator allocator,
    2678  VmaAllocation allocation,
    2679  void* pUserData);
    2680 
    2692  VmaAllocator allocator,
    2693  VmaAllocation* pAllocation);
    2694 
    2729 VkResult vmaMapMemory(
    2730  VmaAllocator allocator,
    2731  VmaAllocation allocation,
    2732  void** ppData);
    2733 
    2738 void vmaUnmapMemory(
    2739  VmaAllocator allocator,
    2740  VmaAllocation allocation);
    2741 
    2754 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2755 
    2768 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2769 
    2786 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2787 
    2794 VK_DEFINE_HANDLE(VmaDefragmentationContext)
    2795 
    2796 typedef enum VmaDefragmentationFlagBits {
    2800 typedef VkFlags VmaDefragmentationFlags;
    2801 
    2806 typedef struct VmaDefragmentationInfo2 {
    2830  uint32_t poolCount;
    2851  VkDeviceSize maxCpuBytesToMove;
    2861  VkDeviceSize maxGpuBytesToMove;
    2875  VkCommandBuffer commandBuffer;
    2877 
    2882 typedef struct VmaDefragmentationInfo {
    2887  VkDeviceSize maxBytesToMove;
    2894 
    2896 typedef struct VmaDefragmentationStats {
    2898  VkDeviceSize bytesMoved;
    2900  VkDeviceSize bytesFreed;
    2906 
    2933 VkResult vmaDefragmentationBegin(
    2934  VmaAllocator allocator,
    2935  const VmaDefragmentationInfo2* pInfo,
    2936  VmaDefragmentationStats* pStats,
    2937  VmaDefragmentationContext *pContext);
    2938 
    2944 VkResult vmaDefragmentationEnd(
    2945  VmaAllocator allocator,
    2946  VmaDefragmentationContext context);
    2947 
    2988 VkResult vmaDefragment(
    2989  VmaAllocator allocator,
    2990  VmaAllocation* pAllocations,
    2991  size_t allocationCount,
    2992  VkBool32* pAllocationsChanged,
    2993  const VmaDefragmentationInfo *pDefragmentationInfo,
    2994  VmaDefragmentationStats* pDefragmentationStats);
    2995 
    3008 VkResult vmaBindBufferMemory(
    3009  VmaAllocator allocator,
    3010  VmaAllocation allocation,
    3011  VkBuffer buffer);
    3012 
    3025 VkResult vmaBindImageMemory(
    3026  VmaAllocator allocator,
    3027  VmaAllocation allocation,
    3028  VkImage image);
    3029 
    3056 VkResult vmaCreateBuffer(
    3057  VmaAllocator allocator,
    3058  const VkBufferCreateInfo* pBufferCreateInfo,
    3059  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3060  VkBuffer* pBuffer,
    3061  VmaAllocation* pAllocation,
    3062  VmaAllocationInfo* pAllocationInfo);
    3063 
    3075 void vmaDestroyBuffer(
    3076  VmaAllocator allocator,
    3077  VkBuffer buffer,
    3078  VmaAllocation allocation);
    3079 
    3081 VkResult vmaCreateImage(
    3082  VmaAllocator allocator,
    3083  const VkImageCreateInfo* pImageCreateInfo,
    3084  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3085  VkImage* pImage,
    3086  VmaAllocation* pAllocation,
    3087  VmaAllocationInfo* pAllocationInfo);
    3088 
    3100 void vmaDestroyImage(
    3101  VmaAllocator allocator,
    3102  VkImage image,
    3103  VmaAllocation allocation);
    3104 
    3105 #ifdef __cplusplus
    3106 }
    3107 #endif
    3108 
    3109 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    3110 
    3111 // For Visual Studio IntelliSense.
    3112 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    3113 #define VMA_IMPLEMENTATION
    3114 #endif
    3115 
    3116 #ifdef VMA_IMPLEMENTATION
    3117 #undef VMA_IMPLEMENTATION
    3118 
    3119 #include <cstdint>
    3120 #include <cstdlib>
    3121 #include <cstring>
    3122 
    3123 /*******************************************************************************
    3124 CONFIGURATION SECTION
    3125 
    3126 Define some of these macros before each #include of this header or change them
    3127 here if you need other then default behavior depending on your environment.
    3128 */
    3129 
    3130 /*
    3131 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    3132 internally, like:
    3133 
    3134  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    3135 
    3136 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    3137 VmaAllocatorCreateInfo::pVulkanFunctions.
    3138 */
    3139 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    3140 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    3141 #endif
    3142 
    3143 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    3144 //#define VMA_USE_STL_CONTAINERS 1
    3145 
    3146 /* Set this macro to 1 to make the library including and using STL containers:
    3147 std::pair, std::vector, std::list, std::unordered_map.
    3148 
    3149 Set it to 0 or undefined to make the library using its own implementation of
    3150 the containers.
    3151 */
    3152 #if VMA_USE_STL_CONTAINERS
    3153  #define VMA_USE_STL_VECTOR 1
    3154  #define VMA_USE_STL_UNORDERED_MAP 1
    3155  #define VMA_USE_STL_LIST 1
    3156 #endif
    3157 
    3158 #ifndef VMA_USE_STL_SHARED_MUTEX
    3159  // Compiler conforms to C++17.
    3160  #if __cplusplus >= 201703L
    3161  #define VMA_USE_STL_SHARED_MUTEX 1
    3162  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
    3163  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
    3164  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
    3165  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
    3166  #define VMA_USE_STL_SHARED_MUTEX 1
    3167  #else
    3168  #define VMA_USE_STL_SHARED_MUTEX 0
    3169  #endif
    3170 #endif
    3171 
    3172 #if VMA_USE_STL_VECTOR
    3173  #include <vector>
    3174 #endif
    3175 
    3176 #if VMA_USE_STL_UNORDERED_MAP
    3177  #include <unordered_map>
    3178 #endif
    3179 
    3180 #if VMA_USE_STL_LIST
    3181  #include <list>
    3182 #endif
    3183 
    3184 /*
    3185 Following headers are used in this CONFIGURATION section only, so feel free to
    3186 remove them if not needed.
    3187 */
    3188 #include <cassert> // for assert
    3189 #include <algorithm> // for min, max
    3190 #include <mutex>
    3191 #include <atomic> // for std::atomic
    3192 
    3193 #ifndef VMA_NULL
    3194  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    3195  #define VMA_NULL nullptr
    3196 #endif
    3197 
    3198 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    3199 #include <cstdlib>
    3200 void *aligned_alloc(size_t alignment, size_t size)
    3201 {
    3202  // alignment must be >= sizeof(void*)
    3203  if(alignment < sizeof(void*))
    3204  {
    3205  alignment = sizeof(void*);
    3206  }
    3207 
    3208  return memalign(alignment, size);
    3209 }
    3210 #elif defined(__APPLE__) || defined(__ANDROID__)
    3211 #include <cstdlib>
    3212 void *aligned_alloc(size_t alignment, size_t size)
    3213 {
    3214  // alignment must be >= sizeof(void*)
    3215  if(alignment < sizeof(void*))
    3216  {
    3217  alignment = sizeof(void*);
    3218  }
    3219 
    3220  void *pointer;
    3221  if(posix_memalign(&pointer, alignment, size) == 0)
    3222  return pointer;
    3223  return VMA_NULL;
    3224 }
    3225 #endif
    3226 
    3227 // If your compiler is not compatible with C++11 and definition of
    3228 // aligned_alloc() function is missing, uncommeting following line may help:
    3229 
    3230 //#include <malloc.h>
    3231 
    3232 // Normal assert to check for programmer's errors, especially in Debug configuration.
    3233 #ifndef VMA_ASSERT
    3234  #ifdef _DEBUG
    3235  #define VMA_ASSERT(expr) assert(expr)
    3236  #else
    3237  #define VMA_ASSERT(expr)
    3238  #endif
    3239 #endif
    3240 
    3241 // Assert that will be called very often, like inside data structures e.g. operator[].
    3242 // Making it non-empty can make program slow.
    3243 #ifndef VMA_HEAVY_ASSERT
    3244  #ifdef _DEBUG
    3245  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    3246  #else
    3247  #define VMA_HEAVY_ASSERT(expr)
    3248  #endif
    3249 #endif
    3250 
    3251 #ifndef VMA_ALIGN_OF
    3252  #define VMA_ALIGN_OF(type) (__alignof(type))
    3253 #endif
    3254 
    3255 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    3256  #if defined(_WIN32)
    3257  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    3258  #else
    3259  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    3260  #endif
    3261 #endif
    3262 
    3263 #ifndef VMA_SYSTEM_FREE
    3264  #if defined(_WIN32)
    3265  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    3266  #else
    3267  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    3268  #endif
    3269 #endif
    3270 
    3271 #ifndef VMA_MIN
    3272  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    3273 #endif
    3274 
    3275 #ifndef VMA_MAX
    3276  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    3277 #endif
    3278 
    3279 #ifndef VMA_SWAP
    3280  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    3281 #endif
    3282 
    3283 #ifndef VMA_SORT
    3284  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    3285 #endif
    3286 
    3287 #ifndef VMA_DEBUG_LOG
    3288  #define VMA_DEBUG_LOG(format, ...)
    3289  /*
    3290  #define VMA_DEBUG_LOG(format, ...) do { \
    3291  printf(format, __VA_ARGS__); \
    3292  printf("\n"); \
    3293  } while(false)
    3294  */
    3295 #endif
    3296 
    3297 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    3298 #if VMA_STATS_STRING_ENABLED
    3299  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    3300  {
    3301  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    3302  }
    3303  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    3304  {
    3305  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    3306  }
    3307  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    3308  {
    3309  snprintf(outStr, strLen, "%p", ptr);
    3310  }
    3311 #endif
    3312 
    3313 #ifndef VMA_MUTEX
    3314  class VmaMutex
    3315  {
    3316  public:
    3317  void Lock() { m_Mutex.lock(); }
    3318  void Unlock() { m_Mutex.unlock(); }
    3319  private:
    3320  std::mutex m_Mutex;
    3321  };
    3322  #define VMA_MUTEX VmaMutex
    3323 #endif
    3324 
    3325 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
    3326 #ifndef VMA_RW_MUTEX
    3327  #if VMA_USE_STL_SHARED_MUTEX
    3328  // Use std::shared_mutex from C++17.
    3329  #include <shared_mutex>
    3330  class VmaRWMutex
    3331  {
    3332  public:
    3333  void LockRead() { m_Mutex.lock_shared(); }
    3334  void UnlockRead() { m_Mutex.unlock_shared(); }
    3335  void LockWrite() { m_Mutex.lock(); }
    3336  void UnlockWrite() { m_Mutex.unlock(); }
    3337  private:
    3338  std::shared_mutex m_Mutex;
    3339  };
    3340  #define VMA_RW_MUTEX VmaRWMutex
    3341  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
    3342  // Use SRWLOCK from WinAPI.
    3343  // Minimum supported client = Windows Vista, server = Windows Server 2008.
    3344  class VmaRWMutex
    3345  {
    3346  public:
    3347  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
    3348  void LockRead() { AcquireSRWLockShared(&m_Lock); }
    3349  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
    3350  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
    3351  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
    3352  private:
    3353  SRWLOCK m_Lock;
    3354  };
    3355  #define VMA_RW_MUTEX VmaRWMutex
    3356  #else
    3357  // Less efficient fallback: Use normal mutex.
    3358  class VmaRWMutex
    3359  {
    3360  public:
    3361  void LockRead() { m_Mutex.Lock(); }
    3362  void UnlockRead() { m_Mutex.Unlock(); }
    3363  void LockWrite() { m_Mutex.Lock(); }
    3364  void UnlockWrite() { m_Mutex.Unlock(); }
    3365  private:
    3366  VMA_MUTEX m_Mutex;
    3367  };
    3368  #define VMA_RW_MUTEX VmaRWMutex
    3369  #endif // #if VMA_USE_STL_SHARED_MUTEX
    3370 #endif // #ifndef VMA_RW_MUTEX
    3371 
    3372 /*
    3373 If providing your own implementation, you need to implement a subset of std::atomic:
    3374 
    3375 - Constructor(uint32_t desired)
    3376 - uint32_t load() const
    3377 - void store(uint32_t desired)
    3378 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    3379 */
    3380 #ifndef VMA_ATOMIC_UINT32
    3381  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    3382 #endif
    3383 
    3384 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    3385 
    3389  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    3390 #endif
    3391 
    3392 #ifndef VMA_DEBUG_ALIGNMENT
    3393 
    3397  #define VMA_DEBUG_ALIGNMENT (1)
    3398 #endif
    3399 
    3400 #ifndef VMA_DEBUG_MARGIN
    3401 
    3405  #define VMA_DEBUG_MARGIN (0)
    3406 #endif
    3407 
    3408 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3409 
    3413  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3414 #endif
    3415 
    3416 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3417 
    3422  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3423 #endif
    3424 
    3425 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3426 
    3430  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3431 #endif
    3432 
    3433 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3434 
    3438  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3439 #endif
    3440 
    3441 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3442  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3444 #endif
    3445 
    3446 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3447  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3449 #endif
    3450 
    3451 #ifndef VMA_CLASS_NO_COPY
    3452  #define VMA_CLASS_NO_COPY(className) \
    3453  private: \
    3454  className(const className&) = delete; \
    3455  className& operator=(const className&) = delete;
    3456 #endif
    3457 
    3458 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3459 
    3460 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3461 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3462 
    3463 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3464 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3465 
    3466 /*******************************************************************************
    3467 END OF CONFIGURATION
    3468 */
    3469 
    3470 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
    3471 
    3472 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3473  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3474 
    3475 // Returns number of bits set to 1 in (v).
    3476 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3477 {
    3478  uint32_t c = v - ((v >> 1) & 0x55555555);
    3479  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3480  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3481  c = ((c >> 8) + c) & 0x00FF00FF;
    3482  c = ((c >> 16) + c) & 0x0000FFFF;
    3483  return c;
    3484 }
    3485 
    3486 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3487 // Use types like uint32_t, uint64_t as T.
    3488 template <typename T>
    3489 static inline T VmaAlignUp(T val, T align)
    3490 {
    3491  return (val + align - 1) / align * align;
    3492 }
    3493 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3494 // Use types like uint32_t, uint64_t as T.
    3495 template <typename T>
    3496 static inline T VmaAlignDown(T val, T align)
    3497 {
    3498  return val / align * align;
    3499 }
    3500 
    3501 // Division with mathematical rounding to nearest number.
    3502 template <typename T>
    3503 static inline T VmaRoundDiv(T x, T y)
    3504 {
    3505  return (x + (y / (T)2)) / y;
    3506 }
    3507 
    3508 /*
    3509 Returns true if given number is a power of two.
    3510 T must be unsigned integer number or signed integer but always nonnegative.
    3511 For 0 returns true.
    3512 */
    3513 template <typename T>
    3514 inline bool VmaIsPow2(T x)
    3515 {
    3516  return (x & (x-1)) == 0;
    3517 }
    3518 
    3519 // Returns smallest power of 2 greater or equal to v.
    3520 static inline uint32_t VmaNextPow2(uint32_t v)
    3521 {
    3522  v--;
    3523  v |= v >> 1;
    3524  v |= v >> 2;
    3525  v |= v >> 4;
    3526  v |= v >> 8;
    3527  v |= v >> 16;
    3528  v++;
    3529  return v;
    3530 }
    3531 static inline uint64_t VmaNextPow2(uint64_t v)
    3532 {
    3533  v--;
    3534  v |= v >> 1;
    3535  v |= v >> 2;
    3536  v |= v >> 4;
    3537  v |= v >> 8;
    3538  v |= v >> 16;
    3539  v |= v >> 32;
    3540  v++;
    3541  return v;
    3542 }
    3543 
    3544 // Returns largest power of 2 less or equal to v.
    3545 static inline uint32_t VmaPrevPow2(uint32_t v)
    3546 {
    3547  v |= v >> 1;
    3548  v |= v >> 2;
    3549  v |= v >> 4;
    3550  v |= v >> 8;
    3551  v |= v >> 16;
    3552  v = v ^ (v >> 1);
    3553  return v;
    3554 }
    3555 static inline uint64_t VmaPrevPow2(uint64_t v)
    3556 {
    3557  v |= v >> 1;
    3558  v |= v >> 2;
    3559  v |= v >> 4;
    3560  v |= v >> 8;
    3561  v |= v >> 16;
    3562  v |= v >> 32;
    3563  v = v ^ (v >> 1);
    3564  return v;
    3565 }
    3566 
    3567 static inline bool VmaStrIsEmpty(const char* pStr)
    3568 {
    3569  return pStr == VMA_NULL || *pStr == '\0';
    3570 }
    3571 
    3572 #if VMA_STATS_STRING_ENABLED
    3573 
    3574 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3575 {
    3576  switch(algorithm)
    3577  {
    3579  return "Linear";
    3581  return "Buddy";
    3582  case 0:
    3583  return "Default";
    3584  default:
    3585  VMA_ASSERT(0);
    3586  return "";
    3587  }
    3588 }
    3589 
    3590 #endif // #if VMA_STATS_STRING_ENABLED
    3591 
    3592 #ifndef VMA_SORT
    3593 
    3594 template<typename Iterator, typename Compare>
    3595 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3596 {
    3597  Iterator centerValue = end; --centerValue;
    3598  Iterator insertIndex = beg;
    3599  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3600  {
    3601  if(cmp(*memTypeIndex, *centerValue))
    3602  {
    3603  if(insertIndex != memTypeIndex)
    3604  {
    3605  VMA_SWAP(*memTypeIndex, *insertIndex);
    3606  }
    3607  ++insertIndex;
    3608  }
    3609  }
    3610  if(insertIndex != centerValue)
    3611  {
    3612  VMA_SWAP(*insertIndex, *centerValue);
    3613  }
    3614  return insertIndex;
    3615 }
    3616 
    3617 template<typename Iterator, typename Compare>
    3618 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3619 {
    3620  if(beg < end)
    3621  {
    3622  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3623  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3624  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3625  }
    3626 }
    3627 
    3628 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3629 
    3630 #endif // #ifndef VMA_SORT
    3631 
    3632 /*
    3633 Returns true if two memory blocks occupy overlapping pages.
    3634 ResourceA must be in less memory offset than ResourceB.
    3635 
    3636 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3637 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3638 */
    3639 static inline bool VmaBlocksOnSamePage(
    3640  VkDeviceSize resourceAOffset,
    3641  VkDeviceSize resourceASize,
    3642  VkDeviceSize resourceBOffset,
    3643  VkDeviceSize pageSize)
    3644 {
    3645  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3646  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3647  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3648  VkDeviceSize resourceBStart = resourceBOffset;
    3649  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3650  return resourceAEndPage == resourceBStartPage;
    3651 }
    3652 
    3653 enum VmaSuballocationType
    3654 {
    3655  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3656  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3657  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3658  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3659  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3660  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3661  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3662 };
    3663 
    3664 /*
    3665 Returns true if given suballocation types could conflict and must respect
    3666 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3667 or linear image and another one is optimal image. If type is unknown, behave
    3668 conservatively.
    3669 */
    3670 static inline bool VmaIsBufferImageGranularityConflict(
    3671  VmaSuballocationType suballocType1,
    3672  VmaSuballocationType suballocType2)
    3673 {
    3674  if(suballocType1 > suballocType2)
    3675  {
    3676  VMA_SWAP(suballocType1, suballocType2);
    3677  }
    3678 
    3679  switch(suballocType1)
    3680  {
    3681  case VMA_SUBALLOCATION_TYPE_FREE:
    3682  return false;
    3683  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3684  return true;
    3685  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3686  return
    3687  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3688  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3689  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3690  return
    3691  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3692  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3693  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3694  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3695  return
    3696  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3697  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3698  return false;
    3699  default:
    3700  VMA_ASSERT(0);
    3701  return true;
    3702  }
    3703 }
    3704 
    3705 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3706 {
    3707  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3708  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3709  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3710  {
    3711  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3712  }
    3713 }
    3714 
    3715 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3716 {
    3717  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3718  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3719  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3720  {
    3721  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3722  {
    3723  return false;
    3724  }
    3725  }
    3726  return true;
    3727 }
    3728 
    3729 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3730 struct VmaMutexLock
    3731 {
    3732  VMA_CLASS_NO_COPY(VmaMutexLock)
    3733 public:
    3734  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
    3735  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3736  { if(m_pMutex) { m_pMutex->Lock(); } }
    3737  ~VmaMutexLock()
    3738  { if(m_pMutex) { m_pMutex->Unlock(); } }
    3739 private:
    3740  VMA_MUTEX* m_pMutex;
    3741 };
    3742 
    3743 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
    3744 struct VmaMutexLockRead
    3745 {
    3746  VMA_CLASS_NO_COPY(VmaMutexLockRead)
    3747 public:
    3748  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
    3749  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3750  { if(m_pMutex) { m_pMutex->LockRead(); } }
    3751  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
    3752 private:
    3753  VMA_RW_MUTEX* m_pMutex;
    3754 };
    3755 
    3756 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
    3757 struct VmaMutexLockWrite
    3758 {
    3759  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
    3760 public:
    3761  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
    3762  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3763  { if(m_pMutex) { m_pMutex->LockWrite(); } }
    3764  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
    3765 private:
    3766  VMA_RW_MUTEX* m_pMutex;
    3767 };
    3768 
    3769 #if VMA_DEBUG_GLOBAL_MUTEX
    3770  static VMA_MUTEX gDebugGlobalMutex;
    3771  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3772 #else
    3773  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3774 #endif
    3775 
    3776 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3777 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3778 
    3779 /*
    3780 Performs binary search and returns iterator to first element that is greater or
    3781 equal to (key), according to comparison (cmp).
    3782 
    3783 Cmp should return true if first argument is less than second argument.
    3784 
    3785 Returned value is the found element, if present in the collection or place where
    3786 new element with value (key) should be inserted.
    3787 */
    3788 template <typename CmpLess, typename IterT, typename KeyT>
    3789 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3790 {
    3791  size_t down = 0, up = (end - beg);
    3792  while(down < up)
    3793  {
    3794  const size_t mid = (down + up) / 2;
    3795  if(cmp(*(beg+mid), key))
    3796  {
    3797  down = mid + 1;
    3798  }
    3799  else
    3800  {
    3801  up = mid;
    3802  }
    3803  }
    3804  return beg + down;
    3805 }
    3806 
    3807 /*
    3808 Returns true if all pointers in the array are not-null and unique.
    3809 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
    3810 T must be pointer type, e.g. VmaAllocation, VmaPool.
    3811 */
    3812 template<typename T>
    3813 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
    3814 {
    3815  for(uint32_t i = 0; i < count; ++i)
    3816  {
    3817  const T iPtr = arr[i];
    3818  if(iPtr == VMA_NULL)
    3819  {
    3820  return false;
    3821  }
    3822  for(uint32_t j = i + 1; j < count; ++j)
    3823  {
    3824  if(iPtr == arr[j])
    3825  {
    3826  return false;
    3827  }
    3828  }
    3829  }
    3830  return true;
    3831 }
    3832 
    3834 // Memory allocation
    3835 
    3836 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3837 {
    3838  if((pAllocationCallbacks != VMA_NULL) &&
    3839  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3840  {
    3841  return (*pAllocationCallbacks->pfnAllocation)(
    3842  pAllocationCallbacks->pUserData,
    3843  size,
    3844  alignment,
    3845  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3846  }
    3847  else
    3848  {
    3849  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3850  }
    3851 }
    3852 
    3853 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3854 {
    3855  if((pAllocationCallbacks != VMA_NULL) &&
    3856  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3857  {
    3858  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3859  }
    3860  else
    3861  {
    3862  VMA_SYSTEM_FREE(ptr);
    3863  }
    3864 }
    3865 
    3866 template<typename T>
    3867 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3868 {
    3869  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3870 }
    3871 
    3872 template<typename T>
    3873 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3874 {
    3875  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3876 }
    3877 
    3878 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3879 
    3880 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3881 
    3882 template<typename T>
    3883 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3884 {
    3885  ptr->~T();
    3886  VmaFree(pAllocationCallbacks, ptr);
    3887 }
    3888 
    3889 template<typename T>
    3890 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3891 {
    3892  if(ptr != VMA_NULL)
    3893  {
    3894  for(size_t i = count; i--; )
    3895  {
    3896  ptr[i].~T();
    3897  }
    3898  VmaFree(pAllocationCallbacks, ptr);
    3899  }
    3900 }
    3901 
    3902 // STL-compatible allocator.
    3903 template<typename T>
    3904 class VmaStlAllocator
    3905 {
    3906 public:
    3907  const VkAllocationCallbacks* const m_pCallbacks;
    3908  typedef T value_type;
    3909 
    3910  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3911  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3912 
    3913  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3914  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3915 
    3916  template<typename U>
    3917  bool operator==(const VmaStlAllocator<U>& rhs) const
    3918  {
    3919  return m_pCallbacks == rhs.m_pCallbacks;
    3920  }
    3921  template<typename U>
    3922  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3923  {
    3924  return m_pCallbacks != rhs.m_pCallbacks;
    3925  }
    3926 
    3927  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3928 };
    3929 
    3930 #if VMA_USE_STL_VECTOR
    3931 
    3932 #define VmaVector std::vector
    3933 
    3934 template<typename T, typename allocatorT>
    3935 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3936 {
    3937  vec.insert(vec.begin() + index, item);
    3938 }
    3939 
    3940 template<typename T, typename allocatorT>
    3941 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3942 {
    3943  vec.erase(vec.begin() + index);
    3944 }
    3945 
    3946 #else // #if VMA_USE_STL_VECTOR
    3947 
    3948 /* Class with interface compatible with subset of std::vector.
    3949 T must be POD because constructors and destructors are not called and memcpy is
    3950 used for these objects. */
    3951 template<typename T, typename AllocatorT>
    3952 class VmaVector
    3953 {
    3954 public:
    3955  typedef T value_type;
    3956 
    3957  VmaVector(const AllocatorT& allocator) :
    3958  m_Allocator(allocator),
    3959  m_pArray(VMA_NULL),
    3960  m_Count(0),
    3961  m_Capacity(0)
    3962  {
    3963  }
    3964 
    3965  VmaVector(size_t count, const AllocatorT& allocator) :
    3966  m_Allocator(allocator),
    3967  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3968  m_Count(count),
    3969  m_Capacity(count)
    3970  {
    3971  }
    3972 
    3973  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3974  m_Allocator(src.m_Allocator),
    3975  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3976  m_Count(src.m_Count),
    3977  m_Capacity(src.m_Count)
    3978  {
    3979  if(m_Count != 0)
    3980  {
    3981  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3982  }
    3983  }
    3984 
    3985  ~VmaVector()
    3986  {
    3987  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3988  }
    3989 
    3990  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3991  {
    3992  if(&rhs != this)
    3993  {
    3994  resize(rhs.m_Count);
    3995  if(m_Count != 0)
    3996  {
    3997  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    3998  }
    3999  }
    4000  return *this;
    4001  }
    4002 
    4003  bool empty() const { return m_Count == 0; }
    4004  size_t size() const { return m_Count; }
    4005  T* data() { return m_pArray; }
    4006  const T* data() const { return m_pArray; }
    4007 
    4008  T& operator[](size_t index)
    4009  {
    4010  VMA_HEAVY_ASSERT(index < m_Count);
    4011  return m_pArray[index];
    4012  }
    4013  const T& operator[](size_t index) const
    4014  {
    4015  VMA_HEAVY_ASSERT(index < m_Count);
    4016  return m_pArray[index];
    4017  }
    4018 
    4019  T& front()
    4020  {
    4021  VMA_HEAVY_ASSERT(m_Count > 0);
    4022  return m_pArray[0];
    4023  }
    4024  const T& front() const
    4025  {
    4026  VMA_HEAVY_ASSERT(m_Count > 0);
    4027  return m_pArray[0];
    4028  }
    4029  T& back()
    4030  {
    4031  VMA_HEAVY_ASSERT(m_Count > 0);
    4032  return m_pArray[m_Count - 1];
    4033  }
    4034  const T& back() const
    4035  {
    4036  VMA_HEAVY_ASSERT(m_Count > 0);
    4037  return m_pArray[m_Count - 1];
    4038  }
    4039 
    4040  void reserve(size_t newCapacity, bool freeMemory = false)
    4041  {
    4042  newCapacity = VMA_MAX(newCapacity, m_Count);
    4043 
    4044  if((newCapacity < m_Capacity) && !freeMemory)
    4045  {
    4046  newCapacity = m_Capacity;
    4047  }
    4048 
    4049  if(newCapacity != m_Capacity)
    4050  {
    4051  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    4052  if(m_Count != 0)
    4053  {
    4054  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    4055  }
    4056  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4057  m_Capacity = newCapacity;
    4058  m_pArray = newArray;
    4059  }
    4060  }
    4061 
    4062  void resize(size_t newCount, bool freeMemory = false)
    4063  {
    4064  size_t newCapacity = m_Capacity;
    4065  if(newCount > m_Capacity)
    4066  {
    4067  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    4068  }
    4069  else if(freeMemory)
    4070  {
    4071  newCapacity = newCount;
    4072  }
    4073 
    4074  if(newCapacity != m_Capacity)
    4075  {
    4076  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    4077  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    4078  if(elementsToCopy != 0)
    4079  {
    4080  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    4081  }
    4082  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4083  m_Capacity = newCapacity;
    4084  m_pArray = newArray;
    4085  }
    4086 
    4087  m_Count = newCount;
    4088  }
    4089 
    4090  void clear(bool freeMemory = false)
    4091  {
    4092  resize(0, freeMemory);
    4093  }
    4094 
    4095  void insert(size_t index, const T& src)
    4096  {
    4097  VMA_HEAVY_ASSERT(index <= m_Count);
    4098  const size_t oldCount = size();
    4099  resize(oldCount + 1);
    4100  if(index < oldCount)
    4101  {
    4102  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    4103  }
    4104  m_pArray[index] = src;
    4105  }
    4106 
    4107  void remove(size_t index)
    4108  {
    4109  VMA_HEAVY_ASSERT(index < m_Count);
    4110  const size_t oldCount = size();
    4111  if(index < oldCount - 1)
    4112  {
    4113  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    4114  }
    4115  resize(oldCount - 1);
    4116  }
    4117 
    4118  void push_back(const T& src)
    4119  {
    4120  const size_t newIndex = size();
    4121  resize(newIndex + 1);
    4122  m_pArray[newIndex] = src;
    4123  }
    4124 
    4125  void pop_back()
    4126  {
    4127  VMA_HEAVY_ASSERT(m_Count > 0);
    4128  resize(size() - 1);
    4129  }
    4130 
    4131  void push_front(const T& src)
    4132  {
    4133  insert(0, src);
    4134  }
    4135 
    4136  void pop_front()
    4137  {
    4138  VMA_HEAVY_ASSERT(m_Count > 0);
    4139  remove(0);
    4140  }
    4141 
    4142  typedef T* iterator;
    4143 
    4144  iterator begin() { return m_pArray; }
    4145  iterator end() { return m_pArray + m_Count; }
    4146 
    4147 private:
    4148  AllocatorT m_Allocator;
    4149  T* m_pArray;
    4150  size_t m_Count;
    4151  size_t m_Capacity;
    4152 };
    4153 
    4154 template<typename T, typename allocatorT>
    4155 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    4156 {
    4157  vec.insert(index, item);
    4158 }
    4159 
    4160 template<typename T, typename allocatorT>
    4161 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    4162 {
    4163  vec.remove(index);
    4164 }
    4165 
    4166 #endif // #if VMA_USE_STL_VECTOR
    4167 
    4168 template<typename CmpLess, typename VectorT>
    4169 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    4170 {
    4171  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4172  vector.data(),
    4173  vector.data() + vector.size(),
    4174  value,
    4175  CmpLess()) - vector.data();
    4176  VmaVectorInsert(vector, indexToInsert, value);
    4177  return indexToInsert;
    4178 }
    4179 
    4180 template<typename CmpLess, typename VectorT>
    4181 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    4182 {
    4183  CmpLess comparator;
    4184  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    4185  vector.begin(),
    4186  vector.end(),
    4187  value,
    4188  comparator);
    4189  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    4190  {
    4191  size_t indexToRemove = it - vector.begin();
    4192  VmaVectorRemove(vector, indexToRemove);
    4193  return true;
    4194  }
    4195  return false;
    4196 }
    4197 
    4198 template<typename CmpLess, typename IterT, typename KeyT>
    4199 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    4200 {
    4201  CmpLess comparator;
    4202  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    4203  beg, end, value, comparator);
    4204  if(it == end ||
    4205  (!comparator(*it, value) && !comparator(value, *it)))
    4206  {
    4207  return it;
    4208  }
    4209  return end;
    4210 }
    4211 
    4213 // class VmaPoolAllocator
    4214 
    4215 /*
    4216 Allocator for objects of type T using a list of arrays (pools) to speed up
    4217 allocation. Number of elements that can be allocated is not bounded because
    4218 allocator can create multiple blocks.
    4219 */
    4220 template<typename T>
    4221 class VmaPoolAllocator
    4222 {
    4223  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    4224 public:
    4225  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
    4226  ~VmaPoolAllocator();
    4227  void Clear();
    4228  T* Alloc();
    4229  void Free(T* ptr);
    4230 
    4231 private:
    4232  union Item
    4233  {
    4234  uint32_t NextFreeIndex;
    4235  T Value;
    4236  };
    4237 
    4238  struct ItemBlock
    4239  {
    4240  Item* pItems;
    4241  uint32_t Capacity;
    4242  uint32_t FirstFreeIndex;
    4243  };
    4244 
    4245  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4246  const uint32_t m_FirstBlockCapacity;
    4247  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    4248 
    4249  ItemBlock& CreateNewBlock();
    4250 };
    4251 
    4252 template<typename T>
    4253 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
    4254  m_pAllocationCallbacks(pAllocationCallbacks),
    4255  m_FirstBlockCapacity(firstBlockCapacity),
    4256  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    4257 {
    4258  VMA_ASSERT(m_FirstBlockCapacity > 1);
    4259 }
    4260 
    4261 template<typename T>
    4262 VmaPoolAllocator<T>::~VmaPoolAllocator()
    4263 {
    4264  Clear();
    4265 }
    4266 
    4267 template<typename T>
    4268 void VmaPoolAllocator<T>::Clear()
    4269 {
    4270  for(size_t i = m_ItemBlocks.size(); i--; )
    4271  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
    4272  m_ItemBlocks.clear();
    4273 }
    4274 
    4275 template<typename T>
    4276 T* VmaPoolAllocator<T>::Alloc()
    4277 {
    4278  for(size_t i = m_ItemBlocks.size(); i--; )
    4279  {
    4280  ItemBlock& block = m_ItemBlocks[i];
    4281  // This block has some free items: Use first one.
    4282  if(block.FirstFreeIndex != UINT32_MAX)
    4283  {
    4284  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    4285  block.FirstFreeIndex = pItem->NextFreeIndex;
    4286  return &pItem->Value;
    4287  }
    4288  }
    4289 
    4290  // No block has free item: Create new one and use it.
    4291  ItemBlock& newBlock = CreateNewBlock();
    4292  Item* const pItem = &newBlock.pItems[0];
    4293  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    4294  return &pItem->Value;
    4295 }
    4296 
    4297 template<typename T>
    4298 void VmaPoolAllocator<T>::Free(T* ptr)
    4299 {
    4300  // Search all memory blocks to find ptr.
    4301  for(size_t i = m_ItemBlocks.size(); i--; )
    4302  {
    4303  ItemBlock& block = m_ItemBlocks[i];
    4304 
    4305  // Casting to union.
    4306  Item* pItemPtr;
    4307  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    4308 
    4309  // Check if pItemPtr is in address range of this block.
    4310  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
    4311  {
    4312  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    4313  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    4314  block.FirstFreeIndex = index;
    4315  return;
    4316  }
    4317  }
    4318  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    4319 }
    4320 
    4321 template<typename T>
    4322 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    4323 {
    4324  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
    4325  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
    4326 
    4327  const ItemBlock newBlock = {
    4328  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
    4329  newBlockCapacity,
    4330  0 };
    4331 
    4332  m_ItemBlocks.push_back(newBlock);
    4333 
    4334  // Setup singly-linked list of all free items in this block.
    4335  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
    4336  newBlock.pItems[i].NextFreeIndex = i + 1;
    4337  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
    4338  return m_ItemBlocks.back();
    4339 }
    4340 
    4342 // class VmaRawList, VmaList
    4343 
    4344 #if VMA_USE_STL_LIST
    4345 
    4346 #define VmaList std::list
    4347 
    4348 #else // #if VMA_USE_STL_LIST
    4349 
    4350 template<typename T>
    4351 struct VmaListItem
    4352 {
    4353  VmaListItem* pPrev;
    4354  VmaListItem* pNext;
    4355  T Value;
    4356 };
    4357 
    4358 // Doubly linked list.
    4359 template<typename T>
    4360 class VmaRawList
    4361 {
    4362  VMA_CLASS_NO_COPY(VmaRawList)
    4363 public:
    4364  typedef VmaListItem<T> ItemType;
    4365 
    4366  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    4367  ~VmaRawList();
    4368  void Clear();
    4369 
    4370  size_t GetCount() const { return m_Count; }
    4371  bool IsEmpty() const { return m_Count == 0; }
    4372 
    4373  ItemType* Front() { return m_pFront; }
    4374  const ItemType* Front() const { return m_pFront; }
    4375  ItemType* Back() { return m_pBack; }
    4376  const ItemType* Back() const { return m_pBack; }
    4377 
    4378  ItemType* PushBack();
    4379  ItemType* PushFront();
    4380  ItemType* PushBack(const T& value);
    4381  ItemType* PushFront(const T& value);
    4382  void PopBack();
    4383  void PopFront();
    4384 
    4385  // Item can be null - it means PushBack.
    4386  ItemType* InsertBefore(ItemType* pItem);
    4387  // Item can be null - it means PushFront.
    4388  ItemType* InsertAfter(ItemType* pItem);
    4389 
    4390  ItemType* InsertBefore(ItemType* pItem, const T& value);
    4391  ItemType* InsertAfter(ItemType* pItem, const T& value);
    4392 
    4393  void Remove(ItemType* pItem);
    4394 
    4395 private:
    4396  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    4397  VmaPoolAllocator<ItemType> m_ItemAllocator;
    4398  ItemType* m_pFront;
    4399  ItemType* m_pBack;
    4400  size_t m_Count;
    4401 };
    4402 
    4403 template<typename T>
    4404 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    4405  m_pAllocationCallbacks(pAllocationCallbacks),
    4406  m_ItemAllocator(pAllocationCallbacks, 128),
    4407  m_pFront(VMA_NULL),
    4408  m_pBack(VMA_NULL),
    4409  m_Count(0)
    4410 {
    4411 }
    4412 
    4413 template<typename T>
    4414 VmaRawList<T>::~VmaRawList()
    4415 {
    4416  // Intentionally not calling Clear, because that would be unnecessary
    4417  // computations to return all items to m_ItemAllocator as free.
    4418 }
    4419 
    4420 template<typename T>
    4421 void VmaRawList<T>::Clear()
    4422 {
    4423  if(IsEmpty() == false)
    4424  {
    4425  ItemType* pItem = m_pBack;
    4426  while(pItem != VMA_NULL)
    4427  {
    4428  ItemType* const pPrevItem = pItem->pPrev;
    4429  m_ItemAllocator.Free(pItem);
    4430  pItem = pPrevItem;
    4431  }
    4432  m_pFront = VMA_NULL;
    4433  m_pBack = VMA_NULL;
    4434  m_Count = 0;
    4435  }
    4436 }
    4437 
    4438 template<typename T>
    4439 VmaListItem<T>* VmaRawList<T>::PushBack()
    4440 {
    4441  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4442  pNewItem->pNext = VMA_NULL;
    4443  if(IsEmpty())
    4444  {
    4445  pNewItem->pPrev = VMA_NULL;
    4446  m_pFront = pNewItem;
    4447  m_pBack = pNewItem;
    4448  m_Count = 1;
    4449  }
    4450  else
    4451  {
    4452  pNewItem->pPrev = m_pBack;
    4453  m_pBack->pNext = pNewItem;
    4454  m_pBack = pNewItem;
    4455  ++m_Count;
    4456  }
    4457  return pNewItem;
    4458 }
    4459 
    4460 template<typename T>
    4461 VmaListItem<T>* VmaRawList<T>::PushFront()
    4462 {
    4463  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4464  pNewItem->pPrev = VMA_NULL;
    4465  if(IsEmpty())
    4466  {
    4467  pNewItem->pNext = VMA_NULL;
    4468  m_pFront = pNewItem;
    4469  m_pBack = pNewItem;
    4470  m_Count = 1;
    4471  }
    4472  else
    4473  {
    4474  pNewItem->pNext = m_pFront;
    4475  m_pFront->pPrev = pNewItem;
    4476  m_pFront = pNewItem;
    4477  ++m_Count;
    4478  }
    4479  return pNewItem;
    4480 }
    4481 
    4482 template<typename T>
    4483 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4484 {
    4485  ItemType* const pNewItem = PushBack();
    4486  pNewItem->Value = value;
    4487  return pNewItem;
    4488 }
    4489 
    4490 template<typename T>
    4491 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4492 {
    4493  ItemType* const pNewItem = PushFront();
    4494  pNewItem->Value = value;
    4495  return pNewItem;
    4496 }
    4497 
    4498 template<typename T>
    4499 void VmaRawList<T>::PopBack()
    4500 {
    4501  VMA_HEAVY_ASSERT(m_Count > 0);
    4502  ItemType* const pBackItem = m_pBack;
    4503  ItemType* const pPrevItem = pBackItem->pPrev;
    4504  if(pPrevItem != VMA_NULL)
    4505  {
    4506  pPrevItem->pNext = VMA_NULL;
    4507  }
    4508  m_pBack = pPrevItem;
    4509  m_ItemAllocator.Free(pBackItem);
    4510  --m_Count;
    4511 }
    4512 
    4513 template<typename T>
    4514 void VmaRawList<T>::PopFront()
    4515 {
    4516  VMA_HEAVY_ASSERT(m_Count > 0);
    4517  ItemType* const pFrontItem = m_pFront;
    4518  ItemType* const pNextItem = pFrontItem->pNext;
    4519  if(pNextItem != VMA_NULL)
    4520  {
    4521  pNextItem->pPrev = VMA_NULL;
    4522  }
    4523  m_pFront = pNextItem;
    4524  m_ItemAllocator.Free(pFrontItem);
    4525  --m_Count;
    4526 }
    4527 
    4528 template<typename T>
    4529 void VmaRawList<T>::Remove(ItemType* pItem)
    4530 {
    4531  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4532  VMA_HEAVY_ASSERT(m_Count > 0);
    4533 
    4534  if(pItem->pPrev != VMA_NULL)
    4535  {
    4536  pItem->pPrev->pNext = pItem->pNext;
    4537  }
    4538  else
    4539  {
    4540  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4541  m_pFront = pItem->pNext;
    4542  }
    4543 
    4544  if(pItem->pNext != VMA_NULL)
    4545  {
    4546  pItem->pNext->pPrev = pItem->pPrev;
    4547  }
    4548  else
    4549  {
    4550  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4551  m_pBack = pItem->pPrev;
    4552  }
    4553 
    4554  m_ItemAllocator.Free(pItem);
    4555  --m_Count;
    4556 }
    4557 
    4558 template<typename T>
    4559 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4560 {
    4561  if(pItem != VMA_NULL)
    4562  {
    4563  ItemType* const prevItem = pItem->pPrev;
    4564  ItemType* const newItem = m_ItemAllocator.Alloc();
    4565  newItem->pPrev = prevItem;
    4566  newItem->pNext = pItem;
    4567  pItem->pPrev = newItem;
    4568  if(prevItem != VMA_NULL)
    4569  {
    4570  prevItem->pNext = newItem;
    4571  }
    4572  else
    4573  {
    4574  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4575  m_pFront = newItem;
    4576  }
    4577  ++m_Count;
    4578  return newItem;
    4579  }
    4580  else
    4581  return PushBack();
    4582 }
    4583 
    4584 template<typename T>
    4585 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4586 {
    4587  if(pItem != VMA_NULL)
    4588  {
    4589  ItemType* const nextItem = pItem->pNext;
    4590  ItemType* const newItem = m_ItemAllocator.Alloc();
    4591  newItem->pNext = nextItem;
    4592  newItem->pPrev = pItem;
    4593  pItem->pNext = newItem;
    4594  if(nextItem != VMA_NULL)
    4595  {
    4596  nextItem->pPrev = newItem;
    4597  }
    4598  else
    4599  {
    4600  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4601  m_pBack = newItem;
    4602  }
    4603  ++m_Count;
    4604  return newItem;
    4605  }
    4606  else
    4607  return PushFront();
    4608 }
    4609 
    4610 template<typename T>
    4611 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4612 {
    4613  ItemType* const newItem = InsertBefore(pItem);
    4614  newItem->Value = value;
    4615  return newItem;
    4616 }
    4617 
    4618 template<typename T>
    4619 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4620 {
    4621  ItemType* const newItem = InsertAfter(pItem);
    4622  newItem->Value = value;
    4623  return newItem;
    4624 }
    4625 
    4626 template<typename T, typename AllocatorT>
    4627 class VmaList
    4628 {
    4629  VMA_CLASS_NO_COPY(VmaList)
    4630 public:
    4631  class iterator
    4632  {
    4633  public:
    4634  iterator() :
    4635  m_pList(VMA_NULL),
    4636  m_pItem(VMA_NULL)
    4637  {
    4638  }
    4639 
    4640  T& operator*() const
    4641  {
    4642  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4643  return m_pItem->Value;
    4644  }
    4645  T* operator->() const
    4646  {
    4647  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4648  return &m_pItem->Value;
    4649  }
    4650 
    4651  iterator& operator++()
    4652  {
    4653  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4654  m_pItem = m_pItem->pNext;
    4655  return *this;
    4656  }
    4657  iterator& operator--()
    4658  {
    4659  if(m_pItem != VMA_NULL)
    4660  {
    4661  m_pItem = m_pItem->pPrev;
    4662  }
    4663  else
    4664  {
    4665  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4666  m_pItem = m_pList->Back();
    4667  }
    4668  return *this;
    4669  }
    4670 
    4671  iterator operator++(int)
    4672  {
    4673  iterator result = *this;
    4674  ++*this;
    4675  return result;
    4676  }
    4677  iterator operator--(int)
    4678  {
    4679  iterator result = *this;
    4680  --*this;
    4681  return result;
    4682  }
    4683 
    4684  bool operator==(const iterator& rhs) const
    4685  {
    4686  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4687  return m_pItem == rhs.m_pItem;
    4688  }
    4689  bool operator!=(const iterator& rhs) const
    4690  {
    4691  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4692  return m_pItem != rhs.m_pItem;
    4693  }
    4694 
    4695  private:
    4696  VmaRawList<T>* m_pList;
    4697  VmaListItem<T>* m_pItem;
    4698 
    4699  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4700  m_pList(pList),
    4701  m_pItem(pItem)
    4702  {
    4703  }
    4704 
    4705  friend class VmaList<T, AllocatorT>;
    4706  };
    4707 
    4708  class const_iterator
    4709  {
    4710  public:
    4711  const_iterator() :
    4712  m_pList(VMA_NULL),
    4713  m_pItem(VMA_NULL)
    4714  {
    4715  }
    4716 
    4717  const_iterator(const iterator& src) :
    4718  m_pList(src.m_pList),
    4719  m_pItem(src.m_pItem)
    4720  {
    4721  }
    4722 
    4723  const T& operator*() const
    4724  {
    4725  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4726  return m_pItem->Value;
    4727  }
    4728  const T* operator->() const
    4729  {
    4730  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4731  return &m_pItem->Value;
    4732  }
    4733 
    4734  const_iterator& operator++()
    4735  {
    4736  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4737  m_pItem = m_pItem->pNext;
    4738  return *this;
    4739  }
    4740  const_iterator& operator--()
    4741  {
    4742  if(m_pItem != VMA_NULL)
    4743  {
    4744  m_pItem = m_pItem->pPrev;
    4745  }
    4746  else
    4747  {
    4748  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4749  m_pItem = m_pList->Back();
    4750  }
    4751  return *this;
    4752  }
    4753 
    4754  const_iterator operator++(int)
    4755  {
    4756  const_iterator result = *this;
    4757  ++*this;
    4758  return result;
    4759  }
    4760  const_iterator operator--(int)
    4761  {
    4762  const_iterator result = *this;
    4763  --*this;
    4764  return result;
    4765  }
    4766 
    4767  bool operator==(const const_iterator& rhs) const
    4768  {
    4769  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4770  return m_pItem == rhs.m_pItem;
    4771  }
    4772  bool operator!=(const const_iterator& rhs) const
    4773  {
    4774  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4775  return m_pItem != rhs.m_pItem;
    4776  }
    4777 
    4778  private:
    4779  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4780  m_pList(pList),
    4781  m_pItem(pItem)
    4782  {
    4783  }
    4784 
    4785  const VmaRawList<T>* m_pList;
    4786  const VmaListItem<T>* m_pItem;
    4787 
    4788  friend class VmaList<T, AllocatorT>;
    4789  };
    4790 
    4791  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4792 
    4793  bool empty() const { return m_RawList.IsEmpty(); }
    4794  size_t size() const { return m_RawList.GetCount(); }
    4795 
    4796  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4797  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4798 
    4799  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4800  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4801 
    4802  void clear() { m_RawList.Clear(); }
    4803  void push_back(const T& value) { m_RawList.PushBack(value); }
    4804  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4805  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4806 
    4807 private:
    4808  VmaRawList<T> m_RawList;
    4809 };
    4810 
    4811 #endif // #if VMA_USE_STL_LIST
    4812 
    4814 // class VmaMap
    4815 
    4816 // Unused in this version.
    4817 #if 0
    4818 
    4819 #if VMA_USE_STL_UNORDERED_MAP
    4820 
    4821 #define VmaPair std::pair
    4822 
    4823 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4824  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4825 
    4826 #else // #if VMA_USE_STL_UNORDERED_MAP
    4827 
    4828 template<typename T1, typename T2>
    4829 struct VmaPair
    4830 {
    4831  T1 first;
    4832  T2 second;
    4833 
    4834  VmaPair() : first(), second() { }
    4835  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4836 };
    4837 
    4838 /* Class compatible with subset of interface of std::unordered_map.
    4839 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4840 */
    4841 template<typename KeyT, typename ValueT>
    4842 class VmaMap
    4843 {
    4844 public:
    4845  typedef VmaPair<KeyT, ValueT> PairType;
    4846  typedef PairType* iterator;
    4847 
    4848  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4849 
    4850  iterator begin() { return m_Vector.begin(); }
    4851  iterator end() { return m_Vector.end(); }
    4852 
    4853  void insert(const PairType& pair);
    4854  iterator find(const KeyT& key);
    4855  void erase(iterator it);
    4856 
    4857 private:
    4858  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4859 };
    4860 
    4861 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4862 
    4863 template<typename FirstT, typename SecondT>
    4864 struct VmaPairFirstLess
    4865 {
    4866  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4867  {
    4868  return lhs.first < rhs.first;
    4869  }
    4870  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4871  {
    4872  return lhs.first < rhsFirst;
    4873  }
    4874 };
    4875 
    4876 template<typename KeyT, typename ValueT>
    4877 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4878 {
    4879  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4880  m_Vector.data(),
    4881  m_Vector.data() + m_Vector.size(),
    4882  pair,
    4883  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4884  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4885 }
    4886 
    4887 template<typename KeyT, typename ValueT>
    4888 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4889 {
    4890  PairType* it = VmaBinaryFindFirstNotLess(
    4891  m_Vector.data(),
    4892  m_Vector.data() + m_Vector.size(),
    4893  key,
    4894  VmaPairFirstLess<KeyT, ValueT>());
    4895  if((it != m_Vector.end()) && (it->first == key))
    4896  {
    4897  return it;
    4898  }
    4899  else
    4900  {
    4901  return m_Vector.end();
    4902  }
    4903 }
    4904 
    4905 template<typename KeyT, typename ValueT>
    4906 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4907 {
    4908  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4909 }
    4910 
    4911 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4912 
    4913 #endif // #if 0
    4914 
    4916 
    4917 class VmaDeviceMemoryBlock;
    4918 
    4919 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4920 
    4921 struct VmaAllocation_T
    4922 {
    4923 private:
    4924  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4925 
    4926  enum FLAGS
    4927  {
    4928  FLAG_USER_DATA_STRING = 0x01,
    4929  };
    4930 
    4931 public:
    4932  enum ALLOCATION_TYPE
    4933  {
    4934  ALLOCATION_TYPE_NONE,
    4935  ALLOCATION_TYPE_BLOCK,
    4936  ALLOCATION_TYPE_DEDICATED,
    4937  };
    4938 
    4939  /*
    4940  This struct cannot have constructor or destructor. It must be POD because it is
    4941  allocated using VmaPoolAllocator.
    4942  */
    4943 
    4944  void Ctor(uint32_t currentFrameIndex, bool userDataString)
    4945  {
    4946  m_Alignment = 1;
    4947  m_Size = 0;
    4948  m_pUserData = VMA_NULL;
    4949  m_LastUseFrameIndex = currentFrameIndex;
    4950  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
    4951  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
    4952  m_MapCount = 0;
    4953  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
    4954 
    4955 #if VMA_STATS_STRING_ENABLED
    4956  m_CreationFrameIndex = currentFrameIndex;
    4957  m_BufferImageUsage = 0;
    4958 #endif
    4959  }
    4960 
    4961  void Dtor()
    4962  {
    4963  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4964 
    4965  // Check if owned string was freed.
    4966  VMA_ASSERT(m_pUserData == VMA_NULL);
    4967  }
    4968 
    4969  void InitBlockAllocation(
    4970  VmaDeviceMemoryBlock* block,
    4971  VkDeviceSize offset,
    4972  VkDeviceSize alignment,
    4973  VkDeviceSize size,
    4974  VmaSuballocationType suballocationType,
    4975  bool mapped,
    4976  bool canBecomeLost)
    4977  {
    4978  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4979  VMA_ASSERT(block != VMA_NULL);
    4980  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4981  m_Alignment = alignment;
    4982  m_Size = size;
    4983  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4984  m_SuballocationType = (uint8_t)suballocationType;
    4985  m_BlockAllocation.m_Block = block;
    4986  m_BlockAllocation.m_Offset = offset;
    4987  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4988  }
    4989 
    4990  void InitLost()
    4991  {
    4992  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4993  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    4994  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4995  m_BlockAllocation.m_Block = VMA_NULL;
    4996  m_BlockAllocation.m_Offset = 0;
    4997  m_BlockAllocation.m_CanBecomeLost = true;
    4998  }
    4999 
    5000  void ChangeBlockAllocation(
    5001  VmaAllocator hAllocator,
    5002  VmaDeviceMemoryBlock* block,
    5003  VkDeviceSize offset);
    5004 
    5005  void ChangeSize(VkDeviceSize newSize);
    5006  void ChangeOffset(VkDeviceSize newOffset);
    5007 
    5008  // pMappedData not null means allocation is created with MAPPED flag.
    5009  void InitDedicatedAllocation(
    5010  uint32_t memoryTypeIndex,
    5011  VkDeviceMemory hMemory,
    5012  VmaSuballocationType suballocationType,
    5013  void* pMappedData,
    5014  VkDeviceSize size)
    5015  {
    5016  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5017  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    5018  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    5019  m_Alignment = 0;
    5020  m_Size = size;
    5021  m_SuballocationType = (uint8_t)suballocationType;
    5022  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5023  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    5024  m_DedicatedAllocation.m_hMemory = hMemory;
    5025  m_DedicatedAllocation.m_pMappedData = pMappedData;
    5026  }
    5027 
    5028  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    5029  VkDeviceSize GetAlignment() const { return m_Alignment; }
    5030  VkDeviceSize GetSize() const { return m_Size; }
    5031  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    5032  void* GetUserData() const { return m_pUserData; }
    5033  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    5034  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    5035 
    5036  VmaDeviceMemoryBlock* GetBlock() const
    5037  {
    5038  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5039  return m_BlockAllocation.m_Block;
    5040  }
    5041  VkDeviceSize GetOffset() const;
    5042  VkDeviceMemory GetMemory() const;
    5043  uint32_t GetMemoryTypeIndex() const;
    5044  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    5045  void* GetMappedData() const;
    5046  bool CanBecomeLost() const;
    5047 
    5048  uint32_t GetLastUseFrameIndex() const
    5049  {
    5050  return m_LastUseFrameIndex.load();
    5051  }
    5052  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    5053  {
    5054  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    5055  }
    5056  /*
    5057  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    5058  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    5059  - Else, returns false.
    5060 
    5061  If hAllocation is already lost, assert - you should not call it then.
    5062  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    5063  */
    5064  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5065 
    5066  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    5067  {
    5068  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    5069  outInfo.blockCount = 1;
    5070  outInfo.allocationCount = 1;
    5071  outInfo.unusedRangeCount = 0;
    5072  outInfo.usedBytes = m_Size;
    5073  outInfo.unusedBytes = 0;
    5074  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    5075  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5076  outInfo.unusedRangeSizeMax = 0;
    5077  }
    5078 
    5079  void BlockAllocMap();
    5080  void BlockAllocUnmap();
    5081  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    5082  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    5083 
    5084 #if VMA_STATS_STRING_ENABLED
    5085  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    5086  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    5087 
    5088  void InitBufferImageUsage(uint32_t bufferImageUsage)
    5089  {
    5090  VMA_ASSERT(m_BufferImageUsage == 0);
    5091  m_BufferImageUsage = bufferImageUsage;
    5092  }
    5093 
    5094  void PrintParameters(class VmaJsonWriter& json) const;
    5095 #endif
    5096 
    5097 private:
    5098  VkDeviceSize m_Alignment;
    5099  VkDeviceSize m_Size;
    5100  void* m_pUserData;
    5101  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    5102  uint8_t m_Type; // ALLOCATION_TYPE
    5103  uint8_t m_SuballocationType; // VmaSuballocationType
    5104  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    5105  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    5106  uint8_t m_MapCount;
    5107  uint8_t m_Flags; // enum FLAGS
    5108 
    5109  // Allocation out of VmaDeviceMemoryBlock.
    5110  struct BlockAllocation
    5111  {
    5112  VmaDeviceMemoryBlock* m_Block;
    5113  VkDeviceSize m_Offset;
    5114  bool m_CanBecomeLost;
    5115  };
    5116 
    5117  // Allocation for an object that has its own private VkDeviceMemory.
    5118  struct DedicatedAllocation
    5119  {
    5120  uint32_t m_MemoryTypeIndex;
    5121  VkDeviceMemory m_hMemory;
    5122  void* m_pMappedData; // Not null means memory is mapped.
    5123  };
    5124 
    5125  union
    5126  {
    5127  // Allocation out of VmaDeviceMemoryBlock.
    5128  BlockAllocation m_BlockAllocation;
    5129  // Allocation for an object that has its own private VkDeviceMemory.
    5130  DedicatedAllocation m_DedicatedAllocation;
    5131  };
    5132 
    5133 #if VMA_STATS_STRING_ENABLED
    5134  uint32_t m_CreationFrameIndex;
    5135  uint32_t m_BufferImageUsage; // 0 if unknown.
    5136 #endif
    5137 
    5138  void FreeUserDataString(VmaAllocator hAllocator);
    5139 };
    5140 
    5141 /*
    5142 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    5143 allocated memory block or free.
    5144 */
    5145 struct VmaSuballocation
    5146 {
    5147  VkDeviceSize offset;
    5148  VkDeviceSize size;
    5149  VmaAllocation hAllocation;
    5150  VmaSuballocationType type;
    5151 };
    5152 
    5153 // Comparator for offsets.
    5154 struct VmaSuballocationOffsetLess
    5155 {
    5156  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5157  {
    5158  return lhs.offset < rhs.offset;
    5159  }
    5160 };
    5161 struct VmaSuballocationOffsetGreater
    5162 {
    5163  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5164  {
    5165  return lhs.offset > rhs.offset;
    5166  }
    5167 };
    5168 
    5169 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    5170 
    5171 // Cost of one additional allocation lost, as equivalent in bytes.
    5172 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    5173 
    5174 enum class VmaAllocationRequestType
    5175 {
    5176  Normal,
    5177  // Used by "Linear" algorithm.
    5178  UpperAddress,
    5179  EndOf1st,
    5180  EndOf2nd,
    5181 };
    5182 
    5183 /*
    5184 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    5185 
    5186 If canMakeOtherLost was false:
    5187 - item points to a FREE suballocation.
    5188 - itemsToMakeLostCount is 0.
    5189 
    5190 If canMakeOtherLost was true:
    5191 - item points to first of sequence of suballocations, which are either FREE,
    5192  or point to VmaAllocations that can become lost.
    5193 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    5194  the requested allocation to succeed.
    5195 */
    5196 struct VmaAllocationRequest
    5197 {
    5198  VkDeviceSize offset;
    5199  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    5200  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    5201  VmaSuballocationList::iterator item;
    5202  size_t itemsToMakeLostCount;
    5203  void* customData;
    5204  VmaAllocationRequestType type;
    5205 
    5206  VkDeviceSize CalcCost() const
    5207  {
    5208  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    5209  }
    5210 };
    5211 
    5212 /*
    5213 Data structure used for bookkeeping of allocations and unused ranges of memory
    5214 in a single VkDeviceMemory block.
    5215 */
    5216 class VmaBlockMetadata
    5217 {
    5218 public:
    5219  VmaBlockMetadata(VmaAllocator hAllocator);
    5220  virtual ~VmaBlockMetadata() { }
    5221  virtual void Init(VkDeviceSize size) { m_Size = size; }
    5222 
    5223  // Validates all data structures inside this object. If not valid, returns false.
    5224  virtual bool Validate() const = 0;
    5225  VkDeviceSize GetSize() const { return m_Size; }
    5226  virtual size_t GetAllocationCount() const = 0;
    5227  virtual VkDeviceSize GetSumFreeSize() const = 0;
    5228  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    5229  // Returns true if this block is empty - contains only single free suballocation.
    5230  virtual bool IsEmpty() const = 0;
    5231 
    5232  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    5233  // Shouldn't modify blockCount.
    5234  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    5235 
    5236 #if VMA_STATS_STRING_ENABLED
    5237  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    5238 #endif
    5239 
    5240  // Tries to find a place for suballocation with given parameters inside this block.
    5241  // If succeeded, fills pAllocationRequest and returns true.
    5242  // If failed, returns false.
    5243  virtual bool CreateAllocationRequest(
    5244  uint32_t currentFrameIndex,
    5245  uint32_t frameInUseCount,
    5246  VkDeviceSize bufferImageGranularity,
    5247  VkDeviceSize allocSize,
    5248  VkDeviceSize allocAlignment,
    5249  bool upperAddress,
    5250  VmaSuballocationType allocType,
    5251  bool canMakeOtherLost,
    5252  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
    5253  uint32_t strategy,
    5254  VmaAllocationRequest* pAllocationRequest) = 0;
    5255 
    5256  virtual bool MakeRequestedAllocationsLost(
    5257  uint32_t currentFrameIndex,
    5258  uint32_t frameInUseCount,
    5259  VmaAllocationRequest* pAllocationRequest) = 0;
    5260 
    5261  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    5262 
    5263  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    5264 
    5265  // Makes actual allocation based on request. Request must already be checked and valid.
    5266  virtual void Alloc(
    5267  const VmaAllocationRequest& request,
    5268  VmaSuballocationType type,
    5269  VkDeviceSize allocSize,
    5270  VmaAllocation hAllocation) = 0;
    5271 
    5272  // Frees suballocation assigned to given memory region.
    5273  virtual void Free(const VmaAllocation allocation) = 0;
    5274  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    5275 
    5276  // Tries to resize (grow or shrink) space for given allocation, in place.
    5277  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    5278 
    5279 protected:
    5280  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    5281 
    5282 #if VMA_STATS_STRING_ENABLED
    5283  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    5284  VkDeviceSize unusedBytes,
    5285  size_t allocationCount,
    5286  size_t unusedRangeCount) const;
    5287  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    5288  VkDeviceSize offset,
    5289  VmaAllocation hAllocation) const;
    5290  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    5291  VkDeviceSize offset,
    5292  VkDeviceSize size) const;
    5293  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    5294 #endif
    5295 
    5296 private:
    5297  VkDeviceSize m_Size;
    5298  const VkAllocationCallbacks* m_pAllocationCallbacks;
    5299 };
    5300 
    5301 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    5302  VMA_ASSERT(0 && "Validation failed: " #cond); \
    5303  return false; \
    5304  } } while(false)
    5305 
    5306 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    5307 {
    5308  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    5309 public:
    5310  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    5311  virtual ~VmaBlockMetadata_Generic();
    5312  virtual void Init(VkDeviceSize size);
    5313 
    5314  virtual bool Validate() const;
    5315  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    5316  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5317  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5318  virtual bool IsEmpty() const;
    5319 
    5320  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5321  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5322 
    5323 #if VMA_STATS_STRING_ENABLED
    5324  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5325 #endif
    5326 
    5327  virtual bool CreateAllocationRequest(
    5328  uint32_t currentFrameIndex,
    5329  uint32_t frameInUseCount,
    5330  VkDeviceSize bufferImageGranularity,
    5331  VkDeviceSize allocSize,
    5332  VkDeviceSize allocAlignment,
    5333  bool upperAddress,
    5334  VmaSuballocationType allocType,
    5335  bool canMakeOtherLost,
    5336  uint32_t strategy,
    5337  VmaAllocationRequest* pAllocationRequest);
    5338 
    5339  virtual bool MakeRequestedAllocationsLost(
    5340  uint32_t currentFrameIndex,
    5341  uint32_t frameInUseCount,
    5342  VmaAllocationRequest* pAllocationRequest);
    5343 
    5344  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5345 
    5346  virtual VkResult CheckCorruption(const void* pBlockData);
    5347 
    5348  virtual void Alloc(
    5349  const VmaAllocationRequest& request,
    5350  VmaSuballocationType type,
    5351  VkDeviceSize allocSize,
    5352  VmaAllocation hAllocation);
    5353 
    5354  virtual void Free(const VmaAllocation allocation);
    5355  virtual void FreeAtOffset(VkDeviceSize offset);
    5356 
    5357  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    5358 
    5360  // For defragmentation
    5361 
    5362  bool IsBufferImageGranularityConflictPossible(
    5363  VkDeviceSize bufferImageGranularity,
    5364  VmaSuballocationType& inOutPrevSuballocType) const;
    5365 
    5366 private:
    5367  friend class VmaDefragmentationAlgorithm_Generic;
    5368  friend class VmaDefragmentationAlgorithm_Fast;
    5369 
    5370  uint32_t m_FreeCount;
    5371  VkDeviceSize m_SumFreeSize;
    5372  VmaSuballocationList m_Suballocations;
    5373  // Suballocations that are free and have size greater than certain threshold.
    5374  // Sorted by size, ascending.
    5375  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    5376 
    5377  bool ValidateFreeSuballocationList() const;
    5378 
    5379  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    5380  // If yes, fills pOffset and returns true. If no, returns false.
    5381  bool CheckAllocation(
    5382  uint32_t currentFrameIndex,
    5383  uint32_t frameInUseCount,
    5384  VkDeviceSize bufferImageGranularity,
    5385  VkDeviceSize allocSize,
    5386  VkDeviceSize allocAlignment,
    5387  VmaSuballocationType allocType,
    5388  VmaSuballocationList::const_iterator suballocItem,
    5389  bool canMakeOtherLost,
    5390  VkDeviceSize* pOffset,
    5391  size_t* itemsToMakeLostCount,
    5392  VkDeviceSize* pSumFreeSize,
    5393  VkDeviceSize* pSumItemSize) const;
    5394  // Given free suballocation, it merges it with following one, which must also be free.
    5395  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    5396  // Releases given suballocation, making it free.
    5397  // Merges it with adjacent free suballocations if applicable.
    5398  // Returns iterator to new free suballocation at this place.
    5399  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    5400  // Given free suballocation, it inserts it into sorted list of
    5401  // m_FreeSuballocationsBySize if it's suitable.
    5402  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    5403  // Given free suballocation, it removes it from sorted list of
    5404  // m_FreeSuballocationsBySize if it's suitable.
    5405  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    5406 };
    5407 
    5408 /*
    5409 Allocations and their references in internal data structure look like this:
    5410 
    5411 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    5412 
    5413  0 +-------+
    5414  | |
    5415  | |
    5416  | |
    5417  +-------+
    5418  | Alloc | 1st[m_1stNullItemsBeginCount]
    5419  +-------+
    5420  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5421  +-------+
    5422  | ... |
    5423  +-------+
    5424  | Alloc | 1st[1st.size() - 1]
    5425  +-------+
    5426  | |
    5427  | |
    5428  | |
    5429 GetSize() +-------+
    5430 
    5431 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    5432 
    5433  0 +-------+
    5434  | Alloc | 2nd[0]
    5435  +-------+
    5436  | Alloc | 2nd[1]
    5437  +-------+
    5438  | ... |
    5439  +-------+
    5440  | Alloc | 2nd[2nd.size() - 1]
    5441  +-------+
    5442  | |
    5443  | |
    5444  | |
    5445  +-------+
    5446  | Alloc | 1st[m_1stNullItemsBeginCount]
    5447  +-------+
    5448  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5449  +-------+
    5450  | ... |
    5451  +-------+
    5452  | Alloc | 1st[1st.size() - 1]
    5453  +-------+
    5454  | |
    5455 GetSize() +-------+
    5456 
    5457 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    5458 
    5459  0 +-------+
    5460  | |
    5461  | |
    5462  | |
    5463  +-------+
    5464  | Alloc | 1st[m_1stNullItemsBeginCount]
    5465  +-------+
    5466  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5467  +-------+
    5468  | ... |
    5469  +-------+
    5470  | Alloc | 1st[1st.size() - 1]
    5471  +-------+
    5472  | |
    5473  | |
    5474  | |
    5475  +-------+
    5476  | Alloc | 2nd[2nd.size() - 1]
    5477  +-------+
    5478  | ... |
    5479  +-------+
    5480  | Alloc | 2nd[1]
    5481  +-------+
    5482  | Alloc | 2nd[0]
    5483 GetSize() +-------+
    5484 
    5485 */
    5486 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5487 {
    5488  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5489 public:
    5490  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5491  virtual ~VmaBlockMetadata_Linear();
    5492  virtual void Init(VkDeviceSize size);
    5493 
    5494  virtual bool Validate() const;
    5495  virtual size_t GetAllocationCount() const;
    5496  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5497  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5498  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5499 
    5500  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5501  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5502 
    5503 #if VMA_STATS_STRING_ENABLED
    5504  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5505 #endif
    5506 
    5507  virtual bool CreateAllocationRequest(
    5508  uint32_t currentFrameIndex,
    5509  uint32_t frameInUseCount,
    5510  VkDeviceSize bufferImageGranularity,
    5511  VkDeviceSize allocSize,
    5512  VkDeviceSize allocAlignment,
    5513  bool upperAddress,
    5514  VmaSuballocationType allocType,
    5515  bool canMakeOtherLost,
    5516  uint32_t strategy,
    5517  VmaAllocationRequest* pAllocationRequest);
    5518 
    5519  virtual bool MakeRequestedAllocationsLost(
    5520  uint32_t currentFrameIndex,
    5521  uint32_t frameInUseCount,
    5522  VmaAllocationRequest* pAllocationRequest);
    5523 
    5524  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5525 
    5526  virtual VkResult CheckCorruption(const void* pBlockData);
    5527 
    5528  virtual void Alloc(
    5529  const VmaAllocationRequest& request,
    5530  VmaSuballocationType type,
    5531  VkDeviceSize allocSize,
    5532  VmaAllocation hAllocation);
    5533 
    5534  virtual void Free(const VmaAllocation allocation);
    5535  virtual void FreeAtOffset(VkDeviceSize offset);
    5536 
    5537 private:
    5538  /*
    5539  There are two suballocation vectors, used in ping-pong way.
    5540  The one with index m_1stVectorIndex is called 1st.
    5541  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5542  2nd can be non-empty only when 1st is not empty.
    5543  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5544  */
    5545  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5546 
    5547  enum SECOND_VECTOR_MODE
    5548  {
    5549  SECOND_VECTOR_EMPTY,
    5550  /*
    5551  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5552  all have smaller offset.
    5553  */
    5554  SECOND_VECTOR_RING_BUFFER,
    5555  /*
    5556  Suballocations in 2nd vector are upper side of double stack.
    5557  They all have offsets higher than those in 1st vector.
    5558  Top of this stack means smaller offsets, but higher indices in this vector.
    5559  */
    5560  SECOND_VECTOR_DOUBLE_STACK,
    5561  };
    5562 
    5563  VkDeviceSize m_SumFreeSize;
    5564  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5565  uint32_t m_1stVectorIndex;
    5566  SECOND_VECTOR_MODE m_2ndVectorMode;
    5567 
    5568  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5569  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5570  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5571  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5572 
    5573  // Number of items in 1st vector with hAllocation = null at the beginning.
    5574  size_t m_1stNullItemsBeginCount;
    5575  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5576  size_t m_1stNullItemsMiddleCount;
    5577  // Number of items in 2nd vector with hAllocation = null.
    5578  size_t m_2ndNullItemsCount;
    5579 
    5580  bool ShouldCompact1st() const;
    5581  void CleanupAfterFree();
    5582 
    5583  bool CreateAllocationRequest_LowerAddress(
    5584  uint32_t currentFrameIndex,
    5585  uint32_t frameInUseCount,
    5586  VkDeviceSize bufferImageGranularity,
    5587  VkDeviceSize allocSize,
    5588  VkDeviceSize allocAlignment,
    5589  VmaSuballocationType allocType,
    5590  bool canMakeOtherLost,
    5591  uint32_t strategy,
    5592  VmaAllocationRequest* pAllocationRequest);
    5593  bool CreateAllocationRequest_UpperAddress(
    5594  uint32_t currentFrameIndex,
    5595  uint32_t frameInUseCount,
    5596  VkDeviceSize bufferImageGranularity,
    5597  VkDeviceSize allocSize,
    5598  VkDeviceSize allocAlignment,
    5599  VmaSuballocationType allocType,
    5600  bool canMakeOtherLost,
    5601  uint32_t strategy,
    5602  VmaAllocationRequest* pAllocationRequest);
    5603 };
    5604 
    5605 /*
    5606 - GetSize() is the original size of allocated memory block.
    5607 - m_UsableSize is this size aligned down to a power of two.
    5608  All allocations and calculations happen relative to m_UsableSize.
    5609 - GetUnusableSize() is the difference between them.
    5610  It is repoted as separate, unused range, not available for allocations.
    5611 
    5612 Node at level 0 has size = m_UsableSize.
    5613 Each next level contains nodes with size 2 times smaller than current level.
    5614 m_LevelCount is the maximum number of levels to use in the current object.
    5615 */
    5616 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5617 {
    5618  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5619 public:
    5620  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5621  virtual ~VmaBlockMetadata_Buddy();
    5622  virtual void Init(VkDeviceSize size);
    5623 
    5624  virtual bool Validate() const;
    5625  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5626  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5627  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5628  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5629 
    5630  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5631  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5632 
    5633 #if VMA_STATS_STRING_ENABLED
    5634  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5635 #endif
    5636 
    5637  virtual bool CreateAllocationRequest(
    5638  uint32_t currentFrameIndex,
    5639  uint32_t frameInUseCount,
    5640  VkDeviceSize bufferImageGranularity,
    5641  VkDeviceSize allocSize,
    5642  VkDeviceSize allocAlignment,
    5643  bool upperAddress,
    5644  VmaSuballocationType allocType,
    5645  bool canMakeOtherLost,
    5646  uint32_t strategy,
    5647  VmaAllocationRequest* pAllocationRequest);
    5648 
    5649  virtual bool MakeRequestedAllocationsLost(
    5650  uint32_t currentFrameIndex,
    5651  uint32_t frameInUseCount,
    5652  VmaAllocationRequest* pAllocationRequest);
    5653 
    5654  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5655 
    5656  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5657 
    5658  virtual void Alloc(
    5659  const VmaAllocationRequest& request,
    5660  VmaSuballocationType type,
    5661  VkDeviceSize allocSize,
    5662  VmaAllocation hAllocation);
    5663 
    5664  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5665  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5666 
    5667 private:
    5668  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5669  static const size_t MAX_LEVELS = 30;
    5670 
    5671  struct ValidationContext
    5672  {
    5673  size_t calculatedAllocationCount;
    5674  size_t calculatedFreeCount;
    5675  VkDeviceSize calculatedSumFreeSize;
    5676 
    5677  ValidationContext() :
    5678  calculatedAllocationCount(0),
    5679  calculatedFreeCount(0),
    5680  calculatedSumFreeSize(0) { }
    5681  };
    5682 
    5683  struct Node
    5684  {
    5685  VkDeviceSize offset;
    5686  enum TYPE
    5687  {
    5688  TYPE_FREE,
    5689  TYPE_ALLOCATION,
    5690  TYPE_SPLIT,
    5691  TYPE_COUNT
    5692  } type;
    5693  Node* parent;
    5694  Node* buddy;
    5695 
    5696  union
    5697  {
    5698  struct
    5699  {
    5700  Node* prev;
    5701  Node* next;
    5702  } free;
    5703  struct
    5704  {
    5705  VmaAllocation alloc;
    5706  } allocation;
    5707  struct
    5708  {
    5709  Node* leftChild;
    5710  } split;
    5711  };
    5712  };
    5713 
    5714  // Size of the memory block aligned down to a power of two.
    5715  VkDeviceSize m_UsableSize;
    5716  uint32_t m_LevelCount;
    5717 
    5718  Node* m_Root;
    5719  struct {
    5720  Node* front;
    5721  Node* back;
    5722  } m_FreeList[MAX_LEVELS];
    5723  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5724  size_t m_AllocationCount;
    5725  // Number of nodes in the tree with type == TYPE_FREE.
    5726  size_t m_FreeCount;
    5727  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5728  VkDeviceSize m_SumFreeSize;
    5729 
    5730  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5731  void DeleteNode(Node* node);
    5732  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5733  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5734  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5735  // Alloc passed just for validation. Can be null.
    5736  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5737  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5738  // Adds node to the front of FreeList at given level.
    5739  // node->type must be FREE.
    5740  // node->free.prev, next can be undefined.
    5741  void AddToFreeListFront(uint32_t level, Node* node);
    5742  // Removes node from FreeList at given level.
    5743  // node->type must be FREE.
    5744  // node->free.prev, next stay untouched.
    5745  void RemoveFromFreeList(uint32_t level, Node* node);
    5746 
    5747 #if VMA_STATS_STRING_ENABLED
    5748  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5749 #endif
    5750 };
    5751 
    5752 /*
    5753 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5754 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5755 
    5756 Thread-safety: This class must be externally synchronized.
    5757 */
    5758 class VmaDeviceMemoryBlock
    5759 {
    5760  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5761 public:
    5762  VmaBlockMetadata* m_pMetadata;
    5763 
    5764  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5765 
    5766  ~VmaDeviceMemoryBlock()
    5767  {
    5768  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5769  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5770  }
    5771 
    5772  // Always call after construction.
    5773  void Init(
    5774  VmaAllocator hAllocator,
    5775  VmaPool hParentPool,
    5776  uint32_t newMemoryTypeIndex,
    5777  VkDeviceMemory newMemory,
    5778  VkDeviceSize newSize,
    5779  uint32_t id,
    5780  uint32_t algorithm);
    5781  // Always call before destruction.
    5782  void Destroy(VmaAllocator allocator);
    5783 
    5784  VmaPool GetParentPool() const { return m_hParentPool; }
    5785  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5786  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5787  uint32_t GetId() const { return m_Id; }
    5788  void* GetMappedData() const { return m_pMappedData; }
    5789 
    5790  // Validates all data structures inside this object. If not valid, returns false.
    5791  bool Validate() const;
    5792 
    5793  VkResult CheckCorruption(VmaAllocator hAllocator);
    5794 
    5795  // ppData can be null.
    5796  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5797  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5798 
    5799  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5800  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5801 
    5802  VkResult BindBufferMemory(
    5803  const VmaAllocator hAllocator,
    5804  const VmaAllocation hAllocation,
    5805  VkBuffer hBuffer);
    5806  VkResult BindImageMemory(
    5807  const VmaAllocator hAllocator,
    5808  const VmaAllocation hAllocation,
    5809  VkImage hImage);
    5810 
    5811 private:
    5812  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
    5813  uint32_t m_MemoryTypeIndex;
    5814  uint32_t m_Id;
    5815  VkDeviceMemory m_hMemory;
    5816 
    5817  /*
    5818  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5819  Also protects m_MapCount, m_pMappedData.
    5820  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
    5821  */
    5822  VMA_MUTEX m_Mutex;
    5823  uint32_t m_MapCount;
    5824  void* m_pMappedData;
    5825 };
    5826 
    5827 struct VmaPointerLess
    5828 {
    5829  bool operator()(const void* lhs, const void* rhs) const
    5830  {
    5831  return lhs < rhs;
    5832  }
    5833 };
    5834 
    5835 struct VmaDefragmentationMove
    5836 {
    5837  size_t srcBlockIndex;
    5838  size_t dstBlockIndex;
    5839  VkDeviceSize srcOffset;
    5840  VkDeviceSize dstOffset;
    5841  VkDeviceSize size;
    5842 };
    5843 
    5844 class VmaDefragmentationAlgorithm;
    5845 
    5846 /*
    5847 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5848 Vulkan memory type.
    5849 
    5850 Synchronized internally with a mutex.
    5851 */
    5852 struct VmaBlockVector
    5853 {
    5854  VMA_CLASS_NO_COPY(VmaBlockVector)
    5855 public:
    5856  VmaBlockVector(
    5857  VmaAllocator hAllocator,
    5858  VmaPool hParentPool,
    5859  uint32_t memoryTypeIndex,
    5860  VkDeviceSize preferredBlockSize,
    5861  size_t minBlockCount,
    5862  size_t maxBlockCount,
    5863  VkDeviceSize bufferImageGranularity,
    5864  uint32_t frameInUseCount,
    5865  bool isCustomPool,
    5866  bool explicitBlockSize,
    5867  uint32_t algorithm);
    5868  ~VmaBlockVector();
    5869 
    5870  VkResult CreateMinBlocks();
    5871 
    5872  VmaPool GetParentPool() const { return m_hParentPool; }
    5873  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5874  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5875  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5876  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5877  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5878 
    5879  void GetPoolStats(VmaPoolStats* pStats);
    5880 
    5881  bool IsEmpty() const { return m_Blocks.empty(); }
    5882  bool IsCorruptionDetectionEnabled() const;
    5883 
    5884  VkResult Allocate(
    5885  uint32_t currentFrameIndex,
    5886  VkDeviceSize size,
    5887  VkDeviceSize alignment,
    5888  const VmaAllocationCreateInfo& createInfo,
    5889  VmaSuballocationType suballocType,
    5890  size_t allocationCount,
    5891  VmaAllocation* pAllocations);
    5892 
    5893  void Free(
    5894  VmaAllocation hAllocation);
    5895 
    5896  // Adds statistics of this BlockVector to pStats.
    5897  void AddStats(VmaStats* pStats);
    5898 
    5899 #if VMA_STATS_STRING_ENABLED
    5900  void PrintDetailedMap(class VmaJsonWriter& json);
    5901 #endif
    5902 
    5903  void MakePoolAllocationsLost(
    5904  uint32_t currentFrameIndex,
    5905  size_t* pLostAllocationCount);
    5906  VkResult CheckCorruption();
    5907 
    5908  // Saves results in pCtx->res.
    5909  void Defragment(
    5910  class VmaBlockVectorDefragmentationContext* pCtx,
    5911  VmaDefragmentationStats* pStats,
    5912  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    5913  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    5914  VkCommandBuffer commandBuffer);
    5915  void DefragmentationEnd(
    5916  class VmaBlockVectorDefragmentationContext* pCtx,
    5917  VmaDefragmentationStats* pStats);
    5918 
    5920  // To be used only while the m_Mutex is locked. Used during defragmentation.
    5921 
    5922  size_t GetBlockCount() const { return m_Blocks.size(); }
    5923  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
    5924  size_t CalcAllocationCount() const;
    5925  bool IsBufferImageGranularityConflictPossible() const;
    5926 
    5927 private:
    5928  friend class VmaDefragmentationAlgorithm_Generic;
    5929 
    5930  const VmaAllocator m_hAllocator;
    5931  const VmaPool m_hParentPool;
    5932  const uint32_t m_MemoryTypeIndex;
    5933  const VkDeviceSize m_PreferredBlockSize;
    5934  const size_t m_MinBlockCount;
    5935  const size_t m_MaxBlockCount;
    5936  const VkDeviceSize m_BufferImageGranularity;
    5937  const uint32_t m_FrameInUseCount;
    5938  const bool m_IsCustomPool;
    5939  const bool m_ExplicitBlockSize;
    5940  const uint32_t m_Algorithm;
    5941  /* There can be at most one allocation that is completely empty - a
    5942  hysteresis to avoid pessimistic case of alternating creation and destruction
    5943  of a VkDeviceMemory. */
    5944  bool m_HasEmptyBlock;
    5945  VMA_RW_MUTEX m_Mutex;
    5946  // Incrementally sorted by sumFreeSize, ascending.
    5947  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5948  uint32_t m_NextBlockId;
    5949 
    5950  VkDeviceSize CalcMaxBlockSize() const;
    5951 
    5952  // Finds and removes given block from vector.
    5953  void Remove(VmaDeviceMemoryBlock* pBlock);
    5954 
    5955  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5956  // after this call.
    5957  void IncrementallySortBlocks();
    5958 
    5959  VkResult AllocatePage(
    5960  uint32_t currentFrameIndex,
    5961  VkDeviceSize size,
    5962  VkDeviceSize alignment,
    5963  const VmaAllocationCreateInfo& createInfo,
    5964  VmaSuballocationType suballocType,
    5965  VmaAllocation* pAllocation);
    5966 
    5967  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5968  VkResult AllocateFromBlock(
    5969  VmaDeviceMemoryBlock* pBlock,
    5970  uint32_t currentFrameIndex,
    5971  VkDeviceSize size,
    5972  VkDeviceSize alignment,
    5973  VmaAllocationCreateFlags allocFlags,
    5974  void* pUserData,
    5975  VmaSuballocationType suballocType,
    5976  uint32_t strategy,
    5977  VmaAllocation* pAllocation);
    5978 
    5979  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5980 
    5981  // Saves result to pCtx->res.
    5982  void ApplyDefragmentationMovesCpu(
    5983  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    5984  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
    5985  // Saves result to pCtx->res.
    5986  void ApplyDefragmentationMovesGpu(
    5987  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    5988  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    5989  VkCommandBuffer commandBuffer);
    5990 
    5991  /*
    5992  Used during defragmentation. pDefragmentationStats is optional. It's in/out
    5993  - updated with new data.
    5994  */
    5995  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
    5996 };
    5997 
    5998 struct VmaPool_T
    5999 {
    6000  VMA_CLASS_NO_COPY(VmaPool_T)
    6001 public:
    6002  VmaBlockVector m_BlockVector;
    6003 
    6004  VmaPool_T(
    6005  VmaAllocator hAllocator,
    6006  const VmaPoolCreateInfo& createInfo,
    6007  VkDeviceSize preferredBlockSize);
    6008  ~VmaPool_T();
    6009 
    6010  uint32_t GetId() const { return m_Id; }
    6011  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    6012 
    6013 #if VMA_STATS_STRING_ENABLED
    6014  //void PrintDetailedMap(class VmaStringBuilder& sb);
    6015 #endif
    6016 
    6017 private:
    6018  uint32_t m_Id;
    6019 };
    6020 
    6021 /*
    6022 Performs defragmentation:
    6023 
    6024 - Updates `pBlockVector->m_pMetadata`.
    6025 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
    6026 - Does not move actual data, only returns requested moves as `moves`.
    6027 */
    6028 class VmaDefragmentationAlgorithm
    6029 {
    6030  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
    6031 public:
    6032  VmaDefragmentationAlgorithm(
    6033  VmaAllocator hAllocator,
    6034  VmaBlockVector* pBlockVector,
    6035  uint32_t currentFrameIndex) :
    6036  m_hAllocator(hAllocator),
    6037  m_pBlockVector(pBlockVector),
    6038  m_CurrentFrameIndex(currentFrameIndex)
    6039  {
    6040  }
    6041  virtual ~VmaDefragmentationAlgorithm()
    6042  {
    6043  }
    6044 
    6045  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
    6046  virtual void AddAll() = 0;
    6047 
    6048  virtual VkResult Defragment(
    6049  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6050  VkDeviceSize maxBytesToMove,
    6051  uint32_t maxAllocationsToMove) = 0;
    6052 
    6053  virtual VkDeviceSize GetBytesMoved() const = 0;
    6054  virtual uint32_t GetAllocationsMoved() const = 0;
    6055 
    6056 protected:
    6057  VmaAllocator const m_hAllocator;
    6058  VmaBlockVector* const m_pBlockVector;
    6059  const uint32_t m_CurrentFrameIndex;
    6060 
    6061  struct AllocationInfo
    6062  {
    6063  VmaAllocation m_hAllocation;
    6064  VkBool32* m_pChanged;
    6065 
    6066  AllocationInfo() :
    6067  m_hAllocation(VK_NULL_HANDLE),
    6068  m_pChanged(VMA_NULL)
    6069  {
    6070  }
    6071  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
    6072  m_hAllocation(hAlloc),
    6073  m_pChanged(pChanged)
    6074  {
    6075  }
    6076  };
    6077 };
    6078 
    6079 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
    6080 {
    6081  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
    6082 public:
    6083  VmaDefragmentationAlgorithm_Generic(
    6084  VmaAllocator hAllocator,
    6085  VmaBlockVector* pBlockVector,
    6086  uint32_t currentFrameIndex,
    6087  bool overlappingMoveSupported);
    6088  virtual ~VmaDefragmentationAlgorithm_Generic();
    6089 
    6090  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6091  virtual void AddAll() { m_AllAllocations = true; }
    6092 
    6093  virtual VkResult Defragment(
    6094  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6095  VkDeviceSize maxBytesToMove,
    6096  uint32_t maxAllocationsToMove);
    6097 
    6098  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6099  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6100 
    6101 private:
    6102  uint32_t m_AllocationCount;
    6103  bool m_AllAllocations;
    6104 
    6105  VkDeviceSize m_BytesMoved;
    6106  uint32_t m_AllocationsMoved;
    6107 
    6108  struct AllocationInfoSizeGreater
    6109  {
    6110  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6111  {
    6112  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    6113  }
    6114  };
    6115 
    6116  struct AllocationInfoOffsetGreater
    6117  {
    6118  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6119  {
    6120  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
    6121  }
    6122  };
    6123 
    6124  struct BlockInfo
    6125  {
    6126  size_t m_OriginalBlockIndex;
    6127  VmaDeviceMemoryBlock* m_pBlock;
    6128  bool m_HasNonMovableAllocations;
    6129  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    6130 
    6131  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    6132  m_OriginalBlockIndex(SIZE_MAX),
    6133  m_pBlock(VMA_NULL),
    6134  m_HasNonMovableAllocations(true),
    6135  m_Allocations(pAllocationCallbacks)
    6136  {
    6137  }
    6138 
    6139  void CalcHasNonMovableAllocations()
    6140  {
    6141  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    6142  const size_t defragmentAllocCount = m_Allocations.size();
    6143  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    6144  }
    6145 
    6146  void SortAllocationsBySizeDescending()
    6147  {
    6148  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    6149  }
    6150 
    6151  void SortAllocationsByOffsetDescending()
    6152  {
    6153  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
    6154  }
    6155  };
    6156 
    6157  struct BlockPointerLess
    6158  {
    6159  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    6160  {
    6161  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    6162  }
    6163  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6164  {
    6165  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    6166  }
    6167  };
    6168 
    6169  // 1. Blocks with some non-movable allocations go first.
    6170  // 2. Blocks with smaller sumFreeSize go first.
    6171  struct BlockInfoCompareMoveDestination
    6172  {
    6173  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6174  {
    6175  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    6176  {
    6177  return true;
    6178  }
    6179  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    6180  {
    6181  return false;
    6182  }
    6183  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    6184  {
    6185  return true;
    6186  }
    6187  return false;
    6188  }
    6189  };
    6190 
    6191  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    6192  BlockInfoVector m_Blocks;
    6193 
    6194  VkResult DefragmentRound(
    6195  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6196  VkDeviceSize maxBytesToMove,
    6197  uint32_t maxAllocationsToMove);
    6198 
    6199  size_t CalcBlocksWithNonMovableCount() const;
    6200 
    6201  static bool MoveMakesSense(
    6202  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6203  size_t srcBlockIndex, VkDeviceSize srcOffset);
    6204 };
    6205 
    6206 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
    6207 {
    6208  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
    6209 public:
    6210  VmaDefragmentationAlgorithm_Fast(
    6211  VmaAllocator hAllocator,
    6212  VmaBlockVector* pBlockVector,
    6213  uint32_t currentFrameIndex,
    6214  bool overlappingMoveSupported);
    6215  virtual ~VmaDefragmentationAlgorithm_Fast();
    6216 
    6217  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
    6218  virtual void AddAll() { m_AllAllocations = true; }
    6219 
    6220  virtual VkResult Defragment(
    6221  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6222  VkDeviceSize maxBytesToMove,
    6223  uint32_t maxAllocationsToMove);
    6224 
    6225  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6226  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6227 
    6228 private:
    6229  struct BlockInfo
    6230  {
    6231  size_t origBlockIndex;
    6232  };
    6233 
    6234  class FreeSpaceDatabase
    6235  {
    6236  public:
    6237  FreeSpaceDatabase()
    6238  {
    6239  FreeSpace s = {};
    6240  s.blockInfoIndex = SIZE_MAX;
    6241  for(size_t i = 0; i < MAX_COUNT; ++i)
    6242  {
    6243  m_FreeSpaces[i] = s;
    6244  }
    6245  }
    6246 
    6247  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
    6248  {
    6249  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6250  {
    6251  return;
    6252  }
    6253 
    6254  // Find first invalid or the smallest structure.
    6255  size_t bestIndex = SIZE_MAX;
    6256  for(size_t i = 0; i < MAX_COUNT; ++i)
    6257  {
    6258  // Empty structure.
    6259  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
    6260  {
    6261  bestIndex = i;
    6262  break;
    6263  }
    6264  if(m_FreeSpaces[i].size < size &&
    6265  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
    6266  {
    6267  bestIndex = i;
    6268  }
    6269  }
    6270 
    6271  if(bestIndex != SIZE_MAX)
    6272  {
    6273  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
    6274  m_FreeSpaces[bestIndex].offset = offset;
    6275  m_FreeSpaces[bestIndex].size = size;
    6276  }
    6277  }
    6278 
    6279  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
    6280  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
    6281  {
    6282  size_t bestIndex = SIZE_MAX;
    6283  VkDeviceSize bestFreeSpaceAfter = 0;
    6284  for(size_t i = 0; i < MAX_COUNT; ++i)
    6285  {
    6286  // Structure is valid.
    6287  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
    6288  {
    6289  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
    6290  // Allocation fits into this structure.
    6291  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
    6292  {
    6293  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
    6294  (dstOffset + size);
    6295  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
    6296  {
    6297  bestIndex = i;
    6298  bestFreeSpaceAfter = freeSpaceAfter;
    6299  }
    6300  }
    6301  }
    6302  }
    6303 
    6304  if(bestIndex != SIZE_MAX)
    6305  {
    6306  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
    6307  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
    6308 
    6309  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6310  {
    6311  // Leave this structure for remaining empty space.
    6312  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
    6313  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
    6314  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
    6315  }
    6316  else
    6317  {
    6318  // This structure becomes invalid.
    6319  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
    6320  }
    6321 
    6322  return true;
    6323  }
    6324 
    6325  return false;
    6326  }
    6327 
    6328  private:
    6329  static const size_t MAX_COUNT = 4;
    6330 
    6331  struct FreeSpace
    6332  {
    6333  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
    6334  VkDeviceSize offset;
    6335  VkDeviceSize size;
    6336  } m_FreeSpaces[MAX_COUNT];
    6337  };
    6338 
    6339  const bool m_OverlappingMoveSupported;
    6340 
    6341  uint32_t m_AllocationCount;
    6342  bool m_AllAllocations;
    6343 
    6344  VkDeviceSize m_BytesMoved;
    6345  uint32_t m_AllocationsMoved;
    6346 
    6347  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
    6348 
    6349  void PreprocessMetadata();
    6350  void PostprocessMetadata();
    6351  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
    6352 };
    6353 
    6354 struct VmaBlockDefragmentationContext
    6355 {
    6356  enum BLOCK_FLAG
    6357  {
    6358  BLOCK_FLAG_USED = 0x00000001,
    6359  };
    6360  uint32_t flags;
    6361  VkBuffer hBuffer;
    6362 
    6363  VmaBlockDefragmentationContext() :
    6364  flags(0),
    6365  hBuffer(VK_NULL_HANDLE)
    6366  {
    6367  }
    6368 };
    6369 
    6370 class VmaBlockVectorDefragmentationContext
    6371 {
    6372  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
    6373 public:
    6374  VkResult res;
    6375  bool mutexLocked;
    6376  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
    6377 
    6378  VmaBlockVectorDefragmentationContext(
    6379  VmaAllocator hAllocator,
    6380  VmaPool hCustomPool, // Optional.
    6381  VmaBlockVector* pBlockVector,
    6382  uint32_t currFrameIndex,
    6383  uint32_t flags);
    6384  ~VmaBlockVectorDefragmentationContext();
    6385 
    6386  VmaPool GetCustomPool() const { return m_hCustomPool; }
    6387  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
    6388  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
    6389 
    6390  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6391  void AddAll() { m_AllAllocations = true; }
    6392 
    6393  void Begin(bool overlappingMoveSupported);
    6394 
    6395 private:
    6396  const VmaAllocator m_hAllocator;
    6397  // Null if not from custom pool.
    6398  const VmaPool m_hCustomPool;
    6399  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
    6400  VmaBlockVector* const m_pBlockVector;
    6401  const uint32_t m_CurrFrameIndex;
    6402  const uint32_t m_AlgorithmFlags;
    6403  // Owner of this object.
    6404  VmaDefragmentationAlgorithm* m_pAlgorithm;
    6405 
    6406  struct AllocInfo
    6407  {
    6408  VmaAllocation hAlloc;
    6409  VkBool32* pChanged;
    6410  };
    6411  // Used between constructor and Begin.
    6412  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
    6413  bool m_AllAllocations;
    6414 };
    6415 
    6416 struct VmaDefragmentationContext_T
    6417 {
    6418 private:
    6419  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
    6420 public:
    6421  VmaDefragmentationContext_T(
    6422  VmaAllocator hAllocator,
    6423  uint32_t currFrameIndex,
    6424  uint32_t flags,
    6425  VmaDefragmentationStats* pStats);
    6426  ~VmaDefragmentationContext_T();
    6427 
    6428  void AddPools(uint32_t poolCount, VmaPool* pPools);
    6429  void AddAllocations(
    6430  uint32_t allocationCount,
    6431  VmaAllocation* pAllocations,
    6432  VkBool32* pAllocationsChanged);
    6433 
    6434  /*
    6435  Returns:
    6436  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
    6437  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
    6438  - Negative value if error occured and object can be destroyed immediately.
    6439  */
    6440  VkResult Defragment(
    6441  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    6442  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    6443  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
    6444 
    6445 private:
    6446  const VmaAllocator m_hAllocator;
    6447  const uint32_t m_CurrFrameIndex;
    6448  const uint32_t m_Flags;
    6449  VmaDefragmentationStats* const m_pStats;
    6450  // Owner of these objects.
    6451  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
    6452  // Owner of these objects.
    6453  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
    6454 };
    6455 
    6456 #if VMA_RECORDING_ENABLED
    6457 
    6458 class VmaRecorder
    6459 {
    6460 public:
    6461  VmaRecorder();
    6462  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    6463  void WriteConfiguration(
    6464  const VkPhysicalDeviceProperties& devProps,
    6465  const VkPhysicalDeviceMemoryProperties& memProps,
    6466  bool dedicatedAllocationExtensionEnabled);
    6467  ~VmaRecorder();
    6468 
    6469  void RecordCreateAllocator(uint32_t frameIndex);
    6470  void RecordDestroyAllocator(uint32_t frameIndex);
    6471  void RecordCreatePool(uint32_t frameIndex,
    6472  const VmaPoolCreateInfo& createInfo,
    6473  VmaPool pool);
    6474  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    6475  void RecordAllocateMemory(uint32_t frameIndex,
    6476  const VkMemoryRequirements& vkMemReq,
    6477  const VmaAllocationCreateInfo& createInfo,
    6478  VmaAllocation allocation);
    6479  void RecordAllocateMemoryPages(uint32_t frameIndex,
    6480  const VkMemoryRequirements& vkMemReq,
    6481  const VmaAllocationCreateInfo& createInfo,
    6482  uint64_t allocationCount,
    6483  const VmaAllocation* pAllocations);
    6484  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    6485  const VkMemoryRequirements& vkMemReq,
    6486  bool requiresDedicatedAllocation,
    6487  bool prefersDedicatedAllocation,
    6488  const VmaAllocationCreateInfo& createInfo,
    6489  VmaAllocation allocation);
    6490  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    6491  const VkMemoryRequirements& vkMemReq,
    6492  bool requiresDedicatedAllocation,
    6493  bool prefersDedicatedAllocation,
    6494  const VmaAllocationCreateInfo& createInfo,
    6495  VmaAllocation allocation);
    6496  void RecordFreeMemory(uint32_t frameIndex,
    6497  VmaAllocation allocation);
    6498  void RecordFreeMemoryPages(uint32_t frameIndex,
    6499  uint64_t allocationCount,
    6500  const VmaAllocation* pAllocations);
    6501  void RecordResizeAllocation(
    6502  uint32_t frameIndex,
    6503  VmaAllocation allocation,
    6504  VkDeviceSize newSize);
    6505  void RecordSetAllocationUserData(uint32_t frameIndex,
    6506  VmaAllocation allocation,
    6507  const void* pUserData);
    6508  void RecordCreateLostAllocation(uint32_t frameIndex,
    6509  VmaAllocation allocation);
    6510  void RecordMapMemory(uint32_t frameIndex,
    6511  VmaAllocation allocation);
    6512  void RecordUnmapMemory(uint32_t frameIndex,
    6513  VmaAllocation allocation);
    6514  void RecordFlushAllocation(uint32_t frameIndex,
    6515  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6516  void RecordInvalidateAllocation(uint32_t frameIndex,
    6517  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6518  void RecordCreateBuffer(uint32_t frameIndex,
    6519  const VkBufferCreateInfo& bufCreateInfo,
    6520  const VmaAllocationCreateInfo& allocCreateInfo,
    6521  VmaAllocation allocation);
    6522  void RecordCreateImage(uint32_t frameIndex,
    6523  const VkImageCreateInfo& imageCreateInfo,
    6524  const VmaAllocationCreateInfo& allocCreateInfo,
    6525  VmaAllocation allocation);
    6526  void RecordDestroyBuffer(uint32_t frameIndex,
    6527  VmaAllocation allocation);
    6528  void RecordDestroyImage(uint32_t frameIndex,
    6529  VmaAllocation allocation);
    6530  void RecordTouchAllocation(uint32_t frameIndex,
    6531  VmaAllocation allocation);
    6532  void RecordGetAllocationInfo(uint32_t frameIndex,
    6533  VmaAllocation allocation);
    6534  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    6535  VmaPool pool);
    6536  void RecordDefragmentationBegin(uint32_t frameIndex,
    6537  const VmaDefragmentationInfo2& info,
    6539  void RecordDefragmentationEnd(uint32_t frameIndex,
    6541 
    6542 private:
    6543  struct CallParams
    6544  {
    6545  uint32_t threadId;
    6546  double time;
    6547  };
    6548 
    6549  class UserDataString
    6550  {
    6551  public:
    6552  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    6553  const char* GetString() const { return m_Str; }
    6554 
    6555  private:
    6556  char m_PtrStr[17];
    6557  const char* m_Str;
    6558  };
    6559 
    6560  bool m_UseMutex;
    6561  VmaRecordFlags m_Flags;
    6562  FILE* m_File;
    6563  VMA_MUTEX m_FileMutex;
    6564  int64_t m_Freq;
    6565  int64_t m_StartCounter;
    6566 
    6567  void GetBasicParams(CallParams& outParams);
    6568 
    6569  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
    6570  template<typename T>
    6571  void PrintPointerList(uint64_t count, const T* pItems)
    6572  {
    6573  if(count)
    6574  {
    6575  fprintf(m_File, "%p", pItems[0]);
    6576  for(uint64_t i = 1; i < count; ++i)
    6577  {
    6578  fprintf(m_File, " %p", pItems[i]);
    6579  }
    6580  }
    6581  }
    6582 
    6583  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
    6584  void Flush();
    6585 };
    6586 
    6587 #endif // #if VMA_RECORDING_ENABLED
    6588 
    6589 /*
    6590 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
    6591 */
    6592 class VmaAllocationObjectAllocator
    6593 {
    6594  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
    6595 public:
    6596  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
    6597 
    6598  VmaAllocation Allocate();
    6599  void Free(VmaAllocation hAlloc);
    6600 
    6601 private:
    6602  VMA_MUTEX m_Mutex;
    6603  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
    6604 };
    6605 
    6606 // Main allocator object.
    6607 struct VmaAllocator_T
    6608 {
    6609  VMA_CLASS_NO_COPY(VmaAllocator_T)
    6610 public:
    6611  bool m_UseMutex;
    6612  bool m_UseKhrDedicatedAllocation;
    6613  VkDevice m_hDevice;
    6614  bool m_AllocationCallbacksSpecified;
    6615  VkAllocationCallbacks m_AllocationCallbacks;
    6616  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    6617  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
    6618 
    6619  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
    6620  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    6621  VMA_MUTEX m_HeapSizeLimitMutex;
    6622 
    6623  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    6624  VkPhysicalDeviceMemoryProperties m_MemProps;
    6625 
    6626  // Default pools.
    6627  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    6628 
    6629  // Each vector is sorted by memory (handle value).
    6630  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    6631  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    6632  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    6633 
    6634  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    6635  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    6636  ~VmaAllocator_T();
    6637 
    6638  const VkAllocationCallbacks* GetAllocationCallbacks() const
    6639  {
    6640  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    6641  }
    6642  const VmaVulkanFunctions& GetVulkanFunctions() const
    6643  {
    6644  return m_VulkanFunctions;
    6645  }
    6646 
    6647  VkDeviceSize GetBufferImageGranularity() const
    6648  {
    6649  return VMA_MAX(
    6650  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    6651  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    6652  }
    6653 
    6654  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    6655  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    6656 
    6657  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    6658  {
    6659  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    6660  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    6661  }
    6662  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    6663  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    6664  {
    6665  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    6666  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    6667  }
    6668  // Minimum alignment for all allocations in specific memory type.
    6669  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    6670  {
    6671  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    6672  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    6673  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    6674  }
    6675 
    6676  bool IsIntegratedGpu() const
    6677  {
    6678  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    6679  }
    6680 
    6681 #if VMA_RECORDING_ENABLED
    6682  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    6683 #endif
    6684 
    6685  void GetBufferMemoryRequirements(
    6686  VkBuffer hBuffer,
    6687  VkMemoryRequirements& memReq,
    6688  bool& requiresDedicatedAllocation,
    6689  bool& prefersDedicatedAllocation) const;
    6690  void GetImageMemoryRequirements(
    6691  VkImage hImage,
    6692  VkMemoryRequirements& memReq,
    6693  bool& requiresDedicatedAllocation,
    6694  bool& prefersDedicatedAllocation) const;
    6695 
    6696  // Main allocation function.
    6697  VkResult AllocateMemory(
    6698  const VkMemoryRequirements& vkMemReq,
    6699  bool requiresDedicatedAllocation,
    6700  bool prefersDedicatedAllocation,
    6701  VkBuffer dedicatedBuffer,
    6702  VkImage dedicatedImage,
    6703  const VmaAllocationCreateInfo& createInfo,
    6704  VmaSuballocationType suballocType,
    6705  size_t allocationCount,
    6706  VmaAllocation* pAllocations);
    6707 
    6708  // Main deallocation function.
    6709  void FreeMemory(
    6710  size_t allocationCount,
    6711  const VmaAllocation* pAllocations);
    6712 
    6713  VkResult ResizeAllocation(
    6714  const VmaAllocation alloc,
    6715  VkDeviceSize newSize);
    6716 
    6717  void CalculateStats(VmaStats* pStats);
    6718 
    6719 #if VMA_STATS_STRING_ENABLED
    6720  void PrintDetailedMap(class VmaJsonWriter& json);
    6721 #endif
    6722 
    6723  VkResult DefragmentationBegin(
    6724  const VmaDefragmentationInfo2& info,
    6725  VmaDefragmentationStats* pStats,
    6726  VmaDefragmentationContext* pContext);
    6727  VkResult DefragmentationEnd(
    6728  VmaDefragmentationContext context);
    6729 
    6730  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    6731  bool TouchAllocation(VmaAllocation hAllocation);
    6732 
    6733  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    6734  void DestroyPool(VmaPool pool);
    6735  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    6736 
    6737  void SetCurrentFrameIndex(uint32_t frameIndex);
    6738  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    6739 
    6740  void MakePoolAllocationsLost(
    6741  VmaPool hPool,
    6742  size_t* pLostAllocationCount);
    6743  VkResult CheckPoolCorruption(VmaPool hPool);
    6744  VkResult CheckCorruption(uint32_t memoryTypeBits);
    6745 
    6746  void CreateLostAllocation(VmaAllocation* pAllocation);
    6747 
    6748  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    6749  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    6750 
    6751  VkResult Map(VmaAllocation hAllocation, void** ppData);
    6752  void Unmap(VmaAllocation hAllocation);
    6753 
    6754  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    6755  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    6756 
    6757  void FlushOrInvalidateAllocation(
    6758  VmaAllocation hAllocation,
    6759  VkDeviceSize offset, VkDeviceSize size,
    6760  VMA_CACHE_OPERATION op);
    6761 
    6762  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    6763 
    6764 private:
    6765  VkDeviceSize m_PreferredLargeHeapBlockSize;
    6766 
    6767  VkPhysicalDevice m_PhysicalDevice;
    6768  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    6769 
    6770  VMA_RW_MUTEX m_PoolsMutex;
    6771  // Protected by m_PoolsMutex. Sorted by pointer value.
    6772  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    6773  uint32_t m_NextPoolId;
    6774 
    6775  VmaVulkanFunctions m_VulkanFunctions;
    6776 
    6777 #if VMA_RECORDING_ENABLED
    6778  VmaRecorder* m_pRecorder;
    6779 #endif
    6780 
    6781  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    6782 
    6783  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    6784 
    6785  VkResult AllocateMemoryOfType(
    6786  VkDeviceSize size,
    6787  VkDeviceSize alignment,
    6788  bool dedicatedAllocation,
    6789  VkBuffer dedicatedBuffer,
    6790  VkImage dedicatedImage,
    6791  const VmaAllocationCreateInfo& createInfo,
    6792  uint32_t memTypeIndex,
    6793  VmaSuballocationType suballocType,
    6794  size_t allocationCount,
    6795  VmaAllocation* pAllocations);
    6796 
    6797  // Helper function only to be used inside AllocateDedicatedMemory.
    6798  VkResult AllocateDedicatedMemoryPage(
    6799  VkDeviceSize size,
    6800  VmaSuballocationType suballocType,
    6801  uint32_t memTypeIndex,
    6802  const VkMemoryAllocateInfo& allocInfo,
    6803  bool map,
    6804  bool isUserDataString,
    6805  void* pUserData,
    6806  VmaAllocation* pAllocation);
    6807 
    6808  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
    6809  VkResult AllocateDedicatedMemory(
    6810  VkDeviceSize size,
    6811  VmaSuballocationType suballocType,
    6812  uint32_t memTypeIndex,
    6813  bool map,
    6814  bool isUserDataString,
    6815  void* pUserData,
    6816  VkBuffer dedicatedBuffer,
    6817  VkImage dedicatedImage,
    6818  size_t allocationCount,
    6819  VmaAllocation* pAllocations);
    6820 
    6821  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    6822  void FreeDedicatedMemory(VmaAllocation allocation);
    6823 };
    6824 
    6826 // Memory allocation #2 after VmaAllocator_T definition
    6827 
    6828 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    6829 {
    6830  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    6831 }
    6832 
    6833 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    6834 {
    6835  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    6836 }
    6837 
    6838 template<typename T>
    6839 static T* VmaAllocate(VmaAllocator hAllocator)
    6840 {
    6841  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    6842 }
    6843 
    6844 template<typename T>
    6845 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    6846 {
    6847  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    6848 }
    6849 
    6850 template<typename T>
    6851 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    6852 {
    6853  if(ptr != VMA_NULL)
    6854  {
    6855  ptr->~T();
    6856  VmaFree(hAllocator, ptr);
    6857  }
    6858 }
    6859 
    6860 template<typename T>
    6861 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    6862 {
    6863  if(ptr != VMA_NULL)
    6864  {
    6865  for(size_t i = count; i--; )
    6866  ptr[i].~T();
    6867  VmaFree(hAllocator, ptr);
    6868  }
    6869 }
    6870 
    6872 // VmaStringBuilder
    6873 
    6874 #if VMA_STATS_STRING_ENABLED
    6875 
    6876 class VmaStringBuilder
    6877 {
    6878 public:
    6879  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    6880  size_t GetLength() const { return m_Data.size(); }
    6881  const char* GetData() const { return m_Data.data(); }
    6882 
    6883  void Add(char ch) { m_Data.push_back(ch); }
    6884  void Add(const char* pStr);
    6885  void AddNewLine() { Add('\n'); }
    6886  void AddNumber(uint32_t num);
    6887  void AddNumber(uint64_t num);
    6888  void AddPointer(const void* ptr);
    6889 
    6890 private:
    6891  VmaVector< char, VmaStlAllocator<char> > m_Data;
    6892 };
    6893 
    6894 void VmaStringBuilder::Add(const char* pStr)
    6895 {
    6896  const size_t strLen = strlen(pStr);
    6897  if(strLen > 0)
    6898  {
    6899  const size_t oldCount = m_Data.size();
    6900  m_Data.resize(oldCount + strLen);
    6901  memcpy(m_Data.data() + oldCount, pStr, strLen);
    6902  }
    6903 }
    6904 
    6905 void VmaStringBuilder::AddNumber(uint32_t num)
    6906 {
    6907  char buf[11];
    6908  VmaUint32ToStr(buf, sizeof(buf), num);
    6909  Add(buf);
    6910 }
    6911 
    6912 void VmaStringBuilder::AddNumber(uint64_t num)
    6913 {
    6914  char buf[21];
    6915  VmaUint64ToStr(buf, sizeof(buf), num);
    6916  Add(buf);
    6917 }
    6918 
    6919 void VmaStringBuilder::AddPointer(const void* ptr)
    6920 {
    6921  char buf[21];
    6922  VmaPtrToStr(buf, sizeof(buf), ptr);
    6923  Add(buf);
    6924 }
    6925 
    6926 #endif // #if VMA_STATS_STRING_ENABLED
    6927 
    6929 // VmaJsonWriter
    6930 
    6931 #if VMA_STATS_STRING_ENABLED
    6932 
    6933 class VmaJsonWriter
    6934 {
    6935  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6936 public:
    6937  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6938  ~VmaJsonWriter();
    6939 
    6940  void BeginObject(bool singleLine = false);
    6941  void EndObject();
    6942 
    6943  void BeginArray(bool singleLine = false);
    6944  void EndArray();
    6945 
    6946  void WriteString(const char* pStr);
    6947  void BeginString(const char* pStr = VMA_NULL);
    6948  void ContinueString(const char* pStr);
    6949  void ContinueString(uint32_t n);
    6950  void ContinueString(uint64_t n);
    6951  void ContinueString_Pointer(const void* ptr);
    6952  void EndString(const char* pStr = VMA_NULL);
    6953 
    6954  void WriteNumber(uint32_t n);
    6955  void WriteNumber(uint64_t n);
    6956  void WriteBool(bool b);
    6957  void WriteNull();
    6958 
    6959 private:
    6960  static const char* const INDENT;
    6961 
    6962  enum COLLECTION_TYPE
    6963  {
    6964  COLLECTION_TYPE_OBJECT,
    6965  COLLECTION_TYPE_ARRAY,
    6966  };
    6967  struct StackItem
    6968  {
    6969  COLLECTION_TYPE type;
    6970  uint32_t valueCount;
    6971  bool singleLineMode;
    6972  };
    6973 
    6974  VmaStringBuilder& m_SB;
    6975  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6976  bool m_InsideString;
    6977 
    6978  void BeginValue(bool isString);
    6979  void WriteIndent(bool oneLess = false);
    6980 };
    6981 
    6982 const char* const VmaJsonWriter::INDENT = " ";
    6983 
    6984 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6985  m_SB(sb),
    6986  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6987  m_InsideString(false)
    6988 {
    6989 }
    6990 
    6991 VmaJsonWriter::~VmaJsonWriter()
    6992 {
    6993  VMA_ASSERT(!m_InsideString);
    6994  VMA_ASSERT(m_Stack.empty());
    6995 }
    6996 
    6997 void VmaJsonWriter::BeginObject(bool singleLine)
    6998 {
    6999  VMA_ASSERT(!m_InsideString);
    7000 
    7001  BeginValue(false);
    7002  m_SB.Add('{');
    7003 
    7004  StackItem item;
    7005  item.type = COLLECTION_TYPE_OBJECT;
    7006  item.valueCount = 0;
    7007  item.singleLineMode = singleLine;
    7008  m_Stack.push_back(item);
    7009 }
    7010 
    7011 void VmaJsonWriter::EndObject()
    7012 {
    7013  VMA_ASSERT(!m_InsideString);
    7014 
    7015  WriteIndent(true);
    7016  m_SB.Add('}');
    7017 
    7018  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    7019  m_Stack.pop_back();
    7020 }
    7021 
    7022 void VmaJsonWriter::BeginArray(bool singleLine)
    7023 {
    7024  VMA_ASSERT(!m_InsideString);
    7025 
    7026  BeginValue(false);
    7027  m_SB.Add('[');
    7028 
    7029  StackItem item;
    7030  item.type = COLLECTION_TYPE_ARRAY;
    7031  item.valueCount = 0;
    7032  item.singleLineMode = singleLine;
    7033  m_Stack.push_back(item);
    7034 }
    7035 
    7036 void VmaJsonWriter::EndArray()
    7037 {
    7038  VMA_ASSERT(!m_InsideString);
    7039 
    7040  WriteIndent(true);
    7041  m_SB.Add(']');
    7042 
    7043  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    7044  m_Stack.pop_back();
    7045 }
    7046 
    7047 void VmaJsonWriter::WriteString(const char* pStr)
    7048 {
    7049  BeginString(pStr);
    7050  EndString();
    7051 }
    7052 
    7053 void VmaJsonWriter::BeginString(const char* pStr)
    7054 {
    7055  VMA_ASSERT(!m_InsideString);
    7056 
    7057  BeginValue(true);
    7058  m_SB.Add('"');
    7059  m_InsideString = true;
    7060  if(pStr != VMA_NULL && pStr[0] != '\0')
    7061  {
    7062  ContinueString(pStr);
    7063  }
    7064 }
    7065 
    7066 void VmaJsonWriter::ContinueString(const char* pStr)
    7067 {
    7068  VMA_ASSERT(m_InsideString);
    7069 
    7070  const size_t strLen = strlen(pStr);
    7071  for(size_t i = 0; i < strLen; ++i)
    7072  {
    7073  char ch = pStr[i];
    7074  if(ch == '\\')
    7075  {
    7076  m_SB.Add("\\\\");
    7077  }
    7078  else if(ch == '"')
    7079  {
    7080  m_SB.Add("\\\"");
    7081  }
    7082  else if(ch >= 32)
    7083  {
    7084  m_SB.Add(ch);
    7085  }
    7086  else switch(ch)
    7087  {
    7088  case '\b':
    7089  m_SB.Add("\\b");
    7090  break;
    7091  case '\f':
    7092  m_SB.Add("\\f");
    7093  break;
    7094  case '\n':
    7095  m_SB.Add("\\n");
    7096  break;
    7097  case '\r':
    7098  m_SB.Add("\\r");
    7099  break;
    7100  case '\t':
    7101  m_SB.Add("\\t");
    7102  break;
    7103  default:
    7104  VMA_ASSERT(0 && "Character not currently supported.");
    7105  break;
    7106  }
    7107  }
    7108 }
    7109 
    7110 void VmaJsonWriter::ContinueString(uint32_t n)
    7111 {
    7112  VMA_ASSERT(m_InsideString);
    7113  m_SB.AddNumber(n);
    7114 }
    7115 
    7116 void VmaJsonWriter::ContinueString(uint64_t n)
    7117 {
    7118  VMA_ASSERT(m_InsideString);
    7119  m_SB.AddNumber(n);
    7120 }
    7121 
    7122 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    7123 {
    7124  VMA_ASSERT(m_InsideString);
    7125  m_SB.AddPointer(ptr);
    7126 }
    7127 
    7128 void VmaJsonWriter::EndString(const char* pStr)
    7129 {
    7130  VMA_ASSERT(m_InsideString);
    7131  if(pStr != VMA_NULL && pStr[0] != '\0')
    7132  {
    7133  ContinueString(pStr);
    7134  }
    7135  m_SB.Add('"');
    7136  m_InsideString = false;
    7137 }
    7138 
    7139 void VmaJsonWriter::WriteNumber(uint32_t n)
    7140 {
    7141  VMA_ASSERT(!m_InsideString);
    7142  BeginValue(false);
    7143  m_SB.AddNumber(n);
    7144 }
    7145 
    7146 void VmaJsonWriter::WriteNumber(uint64_t n)
    7147 {
    7148  VMA_ASSERT(!m_InsideString);
    7149  BeginValue(false);
    7150  m_SB.AddNumber(n);
    7151 }
    7152 
    7153 void VmaJsonWriter::WriteBool(bool b)
    7154 {
    7155  VMA_ASSERT(!m_InsideString);
    7156  BeginValue(false);
    7157  m_SB.Add(b ? "true" : "false");
    7158 }
    7159 
    7160 void VmaJsonWriter::WriteNull()
    7161 {
    7162  VMA_ASSERT(!m_InsideString);
    7163  BeginValue(false);
    7164  m_SB.Add("null");
    7165 }
    7166 
    7167 void VmaJsonWriter::BeginValue(bool isString)
    7168 {
    7169  if(!m_Stack.empty())
    7170  {
    7171  StackItem& currItem = m_Stack.back();
    7172  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7173  currItem.valueCount % 2 == 0)
    7174  {
    7175  VMA_ASSERT(isString);
    7176  }
    7177 
    7178  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7179  currItem.valueCount % 2 != 0)
    7180  {
    7181  m_SB.Add(": ");
    7182  }
    7183  else if(currItem.valueCount > 0)
    7184  {
    7185  m_SB.Add(", ");
    7186  WriteIndent();
    7187  }
    7188  else
    7189  {
    7190  WriteIndent();
    7191  }
    7192  ++currItem.valueCount;
    7193  }
    7194 }
    7195 
    7196 void VmaJsonWriter::WriteIndent(bool oneLess)
    7197 {
    7198  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    7199  {
    7200  m_SB.AddNewLine();
    7201 
    7202  size_t count = m_Stack.size();
    7203  if(count > 0 && oneLess)
    7204  {
    7205  --count;
    7206  }
    7207  for(size_t i = 0; i < count; ++i)
    7208  {
    7209  m_SB.Add(INDENT);
    7210  }
    7211  }
    7212 }
    7213 
    7214 #endif // #if VMA_STATS_STRING_ENABLED
    7215 
    7217 
    7218 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    7219 {
    7220  if(IsUserDataString())
    7221  {
    7222  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    7223 
    7224  FreeUserDataString(hAllocator);
    7225 
    7226  if(pUserData != VMA_NULL)
    7227  {
    7228  const char* const newStrSrc = (char*)pUserData;
    7229  const size_t newStrLen = strlen(newStrSrc);
    7230  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    7231  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    7232  m_pUserData = newStrDst;
    7233  }
    7234  }
    7235  else
    7236  {
    7237  m_pUserData = pUserData;
    7238  }
    7239 }
    7240 
    7241 void VmaAllocation_T::ChangeBlockAllocation(
    7242  VmaAllocator hAllocator,
    7243  VmaDeviceMemoryBlock* block,
    7244  VkDeviceSize offset)
    7245 {
    7246  VMA_ASSERT(block != VMA_NULL);
    7247  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7248 
    7249  // Move mapping reference counter from old block to new block.
    7250  if(block != m_BlockAllocation.m_Block)
    7251  {
    7252  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    7253  if(IsPersistentMap())
    7254  ++mapRefCount;
    7255  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    7256  block->Map(hAllocator, mapRefCount, VMA_NULL);
    7257  }
    7258 
    7259  m_BlockAllocation.m_Block = block;
    7260  m_BlockAllocation.m_Offset = offset;
    7261 }
    7262 
    7263 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    7264 {
    7265  VMA_ASSERT(newSize > 0);
    7266  m_Size = newSize;
    7267 }
    7268 
    7269 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
    7270 {
    7271  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7272  m_BlockAllocation.m_Offset = newOffset;
    7273 }
    7274 
    7275 VkDeviceSize VmaAllocation_T::GetOffset() const
    7276 {
    7277  switch(m_Type)
    7278  {
    7279  case ALLOCATION_TYPE_BLOCK:
    7280  return m_BlockAllocation.m_Offset;
    7281  case ALLOCATION_TYPE_DEDICATED:
    7282  return 0;
    7283  default:
    7284  VMA_ASSERT(0);
    7285  return 0;
    7286  }
    7287 }
    7288 
    7289 VkDeviceMemory VmaAllocation_T::GetMemory() const
    7290 {
    7291  switch(m_Type)
    7292  {
    7293  case ALLOCATION_TYPE_BLOCK:
    7294  return m_BlockAllocation.m_Block->GetDeviceMemory();
    7295  case ALLOCATION_TYPE_DEDICATED:
    7296  return m_DedicatedAllocation.m_hMemory;
    7297  default:
    7298  VMA_ASSERT(0);
    7299  return VK_NULL_HANDLE;
    7300  }
    7301 }
    7302 
    7303 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    7304 {
    7305  switch(m_Type)
    7306  {
    7307  case ALLOCATION_TYPE_BLOCK:
    7308  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    7309  case ALLOCATION_TYPE_DEDICATED:
    7310  return m_DedicatedAllocation.m_MemoryTypeIndex;
    7311  default:
    7312  VMA_ASSERT(0);
    7313  return UINT32_MAX;
    7314  }
    7315 }
    7316 
    7317 void* VmaAllocation_T::GetMappedData() const
    7318 {
    7319  switch(m_Type)
    7320  {
    7321  case ALLOCATION_TYPE_BLOCK:
    7322  if(m_MapCount != 0)
    7323  {
    7324  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    7325  VMA_ASSERT(pBlockData != VMA_NULL);
    7326  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    7327  }
    7328  else
    7329  {
    7330  return VMA_NULL;
    7331  }
    7332  break;
    7333  case ALLOCATION_TYPE_DEDICATED:
    7334  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    7335  return m_DedicatedAllocation.m_pMappedData;
    7336  default:
    7337  VMA_ASSERT(0);
    7338  return VMA_NULL;
    7339  }
    7340 }
    7341 
    7342 bool VmaAllocation_T::CanBecomeLost() const
    7343 {
    7344  switch(m_Type)
    7345  {
    7346  case ALLOCATION_TYPE_BLOCK:
    7347  return m_BlockAllocation.m_CanBecomeLost;
    7348  case ALLOCATION_TYPE_DEDICATED:
    7349  return false;
    7350  default:
    7351  VMA_ASSERT(0);
    7352  return false;
    7353  }
    7354 }
    7355 
    7356 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7357 {
    7358  VMA_ASSERT(CanBecomeLost());
    7359 
    7360  /*
    7361  Warning: This is a carefully designed algorithm.
    7362  Do not modify unless you really know what you're doing :)
    7363  */
    7364  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    7365  for(;;)
    7366  {
    7367  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7368  {
    7369  VMA_ASSERT(0);
    7370  return false;
    7371  }
    7372  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    7373  {
    7374  return false;
    7375  }
    7376  else // Last use time earlier than current time.
    7377  {
    7378  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    7379  {
    7380  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    7381  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    7382  return true;
    7383  }
    7384  }
    7385  }
    7386 }
    7387 
    7388 #if VMA_STATS_STRING_ENABLED
    7389 
    7390 // Correspond to values of enum VmaSuballocationType.
    7391 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    7392  "FREE",
    7393  "UNKNOWN",
    7394  "BUFFER",
    7395  "IMAGE_UNKNOWN",
    7396  "IMAGE_LINEAR",
    7397  "IMAGE_OPTIMAL",
    7398 };
    7399 
    7400 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    7401 {
    7402  json.WriteString("Type");
    7403  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    7404 
    7405  json.WriteString("Size");
    7406  json.WriteNumber(m_Size);
    7407 
    7408  if(m_pUserData != VMA_NULL)
    7409  {
    7410  json.WriteString("UserData");
    7411  if(IsUserDataString())
    7412  {
    7413  json.WriteString((const char*)m_pUserData);
    7414  }
    7415  else
    7416  {
    7417  json.BeginString();
    7418  json.ContinueString_Pointer(m_pUserData);
    7419  json.EndString();
    7420  }
    7421  }
    7422 
    7423  json.WriteString("CreationFrameIndex");
    7424  json.WriteNumber(m_CreationFrameIndex);
    7425 
    7426  json.WriteString("LastUseFrameIndex");
    7427  json.WriteNumber(GetLastUseFrameIndex());
    7428 
    7429  if(m_BufferImageUsage != 0)
    7430  {
    7431  json.WriteString("Usage");
    7432  json.WriteNumber(m_BufferImageUsage);
    7433  }
    7434 }
    7435 
    7436 #endif
    7437 
    7438 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    7439 {
    7440  VMA_ASSERT(IsUserDataString());
    7441  if(m_pUserData != VMA_NULL)
    7442  {
    7443  char* const oldStr = (char*)m_pUserData;
    7444  const size_t oldStrLen = strlen(oldStr);
    7445  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    7446  m_pUserData = VMA_NULL;
    7447  }
    7448 }
    7449 
    7450 void VmaAllocation_T::BlockAllocMap()
    7451 {
    7452  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7453 
    7454  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7455  {
    7456  ++m_MapCount;
    7457  }
    7458  else
    7459  {
    7460  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    7461  }
    7462 }
    7463 
    7464 void VmaAllocation_T::BlockAllocUnmap()
    7465 {
    7466  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7467 
    7468  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7469  {
    7470  --m_MapCount;
    7471  }
    7472  else
    7473  {
    7474  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    7475  }
    7476 }
    7477 
    7478 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    7479 {
    7480  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7481 
    7482  if(m_MapCount != 0)
    7483  {
    7484  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7485  {
    7486  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    7487  *ppData = m_DedicatedAllocation.m_pMappedData;
    7488  ++m_MapCount;
    7489  return VK_SUCCESS;
    7490  }
    7491  else
    7492  {
    7493  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    7494  return VK_ERROR_MEMORY_MAP_FAILED;
    7495  }
    7496  }
    7497  else
    7498  {
    7499  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    7500  hAllocator->m_hDevice,
    7501  m_DedicatedAllocation.m_hMemory,
    7502  0, // offset
    7503  VK_WHOLE_SIZE,
    7504  0, // flags
    7505  ppData);
    7506  if(result == VK_SUCCESS)
    7507  {
    7508  m_DedicatedAllocation.m_pMappedData = *ppData;
    7509  m_MapCount = 1;
    7510  }
    7511  return result;
    7512  }
    7513 }
    7514 
    7515 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    7516 {
    7517  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7518 
    7519  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7520  {
    7521  --m_MapCount;
    7522  if(m_MapCount == 0)
    7523  {
    7524  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    7525  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    7526  hAllocator->m_hDevice,
    7527  m_DedicatedAllocation.m_hMemory);
    7528  }
    7529  }
    7530  else
    7531  {
    7532  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    7533  }
    7534 }
    7535 
    7536 #if VMA_STATS_STRING_ENABLED
    7537 
    7538 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    7539 {
    7540  json.BeginObject();
    7541 
    7542  json.WriteString("Blocks");
    7543  json.WriteNumber(stat.blockCount);
    7544 
    7545  json.WriteString("Allocations");
    7546  json.WriteNumber(stat.allocationCount);
    7547 
    7548  json.WriteString("UnusedRanges");
    7549  json.WriteNumber(stat.unusedRangeCount);
    7550 
    7551  json.WriteString("UsedBytes");
    7552  json.WriteNumber(stat.usedBytes);
    7553 
    7554  json.WriteString("UnusedBytes");
    7555  json.WriteNumber(stat.unusedBytes);
    7556 
    7557  if(stat.allocationCount > 1)
    7558  {
    7559  json.WriteString("AllocationSize");
    7560  json.BeginObject(true);
    7561  json.WriteString("Min");
    7562  json.WriteNumber(stat.allocationSizeMin);
    7563  json.WriteString("Avg");
    7564  json.WriteNumber(stat.allocationSizeAvg);
    7565  json.WriteString("Max");
    7566  json.WriteNumber(stat.allocationSizeMax);
    7567  json.EndObject();
    7568  }
    7569 
    7570  if(stat.unusedRangeCount > 1)
    7571  {
    7572  json.WriteString("UnusedRangeSize");
    7573  json.BeginObject(true);
    7574  json.WriteString("Min");
    7575  json.WriteNumber(stat.unusedRangeSizeMin);
    7576  json.WriteString("Avg");
    7577  json.WriteNumber(stat.unusedRangeSizeAvg);
    7578  json.WriteString("Max");
    7579  json.WriteNumber(stat.unusedRangeSizeMax);
    7580  json.EndObject();
    7581  }
    7582 
    7583  json.EndObject();
    7584 }
    7585 
    7586 #endif // #if VMA_STATS_STRING_ENABLED
    7587 
    7588 struct VmaSuballocationItemSizeLess
    7589 {
    7590  bool operator()(
    7591  const VmaSuballocationList::iterator lhs,
    7592  const VmaSuballocationList::iterator rhs) const
    7593  {
    7594  return lhs->size < rhs->size;
    7595  }
    7596  bool operator()(
    7597  const VmaSuballocationList::iterator lhs,
    7598  VkDeviceSize rhsSize) const
    7599  {
    7600  return lhs->size < rhsSize;
    7601  }
    7602 };
    7603 
    7604 
    7606 // class VmaBlockMetadata
    7607 
    7608 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    7609  m_Size(0),
    7610  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    7611 {
    7612 }
    7613 
    7614 #if VMA_STATS_STRING_ENABLED
    7615 
    7616 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    7617  VkDeviceSize unusedBytes,
    7618  size_t allocationCount,
    7619  size_t unusedRangeCount) const
    7620 {
    7621  json.BeginObject();
    7622 
    7623  json.WriteString("TotalBytes");
    7624  json.WriteNumber(GetSize());
    7625 
    7626  json.WriteString("UnusedBytes");
    7627  json.WriteNumber(unusedBytes);
    7628 
    7629  json.WriteString("Allocations");
    7630  json.WriteNumber((uint64_t)allocationCount);
    7631 
    7632  json.WriteString("UnusedRanges");
    7633  json.WriteNumber((uint64_t)unusedRangeCount);
    7634 
    7635  json.WriteString("Suballocations");
    7636  json.BeginArray();
    7637 }
    7638 
    7639 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    7640  VkDeviceSize offset,
    7641  VmaAllocation hAllocation) const
    7642 {
    7643  json.BeginObject(true);
    7644 
    7645  json.WriteString("Offset");
    7646  json.WriteNumber(offset);
    7647 
    7648  hAllocation->PrintParameters(json);
    7649 
    7650  json.EndObject();
    7651 }
    7652 
    7653 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    7654  VkDeviceSize offset,
    7655  VkDeviceSize size) const
    7656 {
    7657  json.BeginObject(true);
    7658 
    7659  json.WriteString("Offset");
    7660  json.WriteNumber(offset);
    7661 
    7662  json.WriteString("Type");
    7663  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    7664 
    7665  json.WriteString("Size");
    7666  json.WriteNumber(size);
    7667 
    7668  json.EndObject();
    7669 }
    7670 
    7671 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    7672 {
    7673  json.EndArray();
    7674  json.EndObject();
    7675 }
    7676 
    7677 #endif // #if VMA_STATS_STRING_ENABLED
    7678 
    7680 // class VmaBlockMetadata_Generic
    7681 
    7682 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    7683  VmaBlockMetadata(hAllocator),
    7684  m_FreeCount(0),
    7685  m_SumFreeSize(0),
    7686  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7687  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    7688 {
    7689 }
    7690 
    7691 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    7692 {
    7693 }
    7694 
    7695 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    7696 {
    7697  VmaBlockMetadata::Init(size);
    7698 
    7699  m_FreeCount = 1;
    7700  m_SumFreeSize = size;
    7701 
    7702  VmaSuballocation suballoc = {};
    7703  suballoc.offset = 0;
    7704  suballoc.size = size;
    7705  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7706  suballoc.hAllocation = VK_NULL_HANDLE;
    7707 
    7708  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7709  m_Suballocations.push_back(suballoc);
    7710  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    7711  --suballocItem;
    7712  m_FreeSuballocationsBySize.push_back(suballocItem);
    7713 }
    7714 
    7715 bool VmaBlockMetadata_Generic::Validate() const
    7716 {
    7717  VMA_VALIDATE(!m_Suballocations.empty());
    7718 
    7719  // Expected offset of new suballocation as calculated from previous ones.
    7720  VkDeviceSize calculatedOffset = 0;
    7721  // Expected number of free suballocations as calculated from traversing their list.
    7722  uint32_t calculatedFreeCount = 0;
    7723  // Expected sum size of free suballocations as calculated from traversing their list.
    7724  VkDeviceSize calculatedSumFreeSize = 0;
    7725  // Expected number of free suballocations that should be registered in
    7726  // m_FreeSuballocationsBySize calculated from traversing their list.
    7727  size_t freeSuballocationsToRegister = 0;
    7728  // True if previous visited suballocation was free.
    7729  bool prevFree = false;
    7730 
    7731  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7732  suballocItem != m_Suballocations.cend();
    7733  ++suballocItem)
    7734  {
    7735  const VmaSuballocation& subAlloc = *suballocItem;
    7736 
    7737  // Actual offset of this suballocation doesn't match expected one.
    7738  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    7739 
    7740  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7741  // Two adjacent free suballocations are invalid. They should be merged.
    7742  VMA_VALIDATE(!prevFree || !currFree);
    7743 
    7744  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    7745 
    7746  if(currFree)
    7747  {
    7748  calculatedSumFreeSize += subAlloc.size;
    7749  ++calculatedFreeCount;
    7750  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7751  {
    7752  ++freeSuballocationsToRegister;
    7753  }
    7754 
    7755  // Margin required between allocations - every free space must be at least that large.
    7756  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    7757  }
    7758  else
    7759  {
    7760  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    7761  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    7762 
    7763  // Margin required between allocations - previous allocation must be free.
    7764  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    7765  }
    7766 
    7767  calculatedOffset += subAlloc.size;
    7768  prevFree = currFree;
    7769  }
    7770 
    7771  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    7772  // match expected one.
    7773  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    7774 
    7775  VkDeviceSize lastSize = 0;
    7776  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    7777  {
    7778  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    7779 
    7780  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    7781  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7782  // They must be sorted by size ascending.
    7783  VMA_VALIDATE(suballocItem->size >= lastSize);
    7784 
    7785  lastSize = suballocItem->size;
    7786  }
    7787 
    7788  // Check if totals match calculacted values.
    7789  VMA_VALIDATE(ValidateFreeSuballocationList());
    7790  VMA_VALIDATE(calculatedOffset == GetSize());
    7791  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    7792  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    7793 
    7794  return true;
    7795 }
    7796 
    7797 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    7798 {
    7799  if(!m_FreeSuballocationsBySize.empty())
    7800  {
    7801  return m_FreeSuballocationsBySize.back()->size;
    7802  }
    7803  else
    7804  {
    7805  return 0;
    7806  }
    7807 }
    7808 
    7809 bool VmaBlockMetadata_Generic::IsEmpty() const
    7810 {
    7811  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    7812 }
    7813 
    7814 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7815 {
    7816  outInfo.blockCount = 1;
    7817 
    7818  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7819  outInfo.allocationCount = rangeCount - m_FreeCount;
    7820  outInfo.unusedRangeCount = m_FreeCount;
    7821 
    7822  outInfo.unusedBytes = m_SumFreeSize;
    7823  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    7824 
    7825  outInfo.allocationSizeMin = UINT64_MAX;
    7826  outInfo.allocationSizeMax = 0;
    7827  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7828  outInfo.unusedRangeSizeMax = 0;
    7829 
    7830  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7831  suballocItem != m_Suballocations.cend();
    7832  ++suballocItem)
    7833  {
    7834  const VmaSuballocation& suballoc = *suballocItem;
    7835  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    7836  {
    7837  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7838  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    7839  }
    7840  else
    7841  {
    7842  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    7843  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    7844  }
    7845  }
    7846 }
    7847 
    7848 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    7849 {
    7850  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7851 
    7852  inoutStats.size += GetSize();
    7853  inoutStats.unusedSize += m_SumFreeSize;
    7854  inoutStats.allocationCount += rangeCount - m_FreeCount;
    7855  inoutStats.unusedRangeCount += m_FreeCount;
    7856  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    7857 }
    7858 
    7859 #if VMA_STATS_STRING_ENABLED
    7860 
    7861 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    7862 {
    7863  PrintDetailedMap_Begin(json,
    7864  m_SumFreeSize, // unusedBytes
    7865  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    7866  m_FreeCount); // unusedRangeCount
    7867 
    7868  size_t i = 0;
    7869  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7870  suballocItem != m_Suballocations.cend();
    7871  ++suballocItem, ++i)
    7872  {
    7873  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7874  {
    7875  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    7876  }
    7877  else
    7878  {
    7879  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    7880  }
    7881  }
    7882 
    7883  PrintDetailedMap_End(json);
    7884 }
    7885 
    7886 #endif // #if VMA_STATS_STRING_ENABLED
    7887 
    7888 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    7889  uint32_t currentFrameIndex,
    7890  uint32_t frameInUseCount,
    7891  VkDeviceSize bufferImageGranularity,
    7892  VkDeviceSize allocSize,
    7893  VkDeviceSize allocAlignment,
    7894  bool upperAddress,
    7895  VmaSuballocationType allocType,
    7896  bool canMakeOtherLost,
    7897  uint32_t strategy,
    7898  VmaAllocationRequest* pAllocationRequest)
    7899 {
    7900  VMA_ASSERT(allocSize > 0);
    7901  VMA_ASSERT(!upperAddress);
    7902  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7903  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    7904  VMA_HEAVY_ASSERT(Validate());
    7905 
    7906  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    7907 
    7908  // There is not enough total free space in this block to fullfill the request: Early return.
    7909  if(canMakeOtherLost == false &&
    7910  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    7911  {
    7912  return false;
    7913  }
    7914 
    7915  // New algorithm, efficiently searching freeSuballocationsBySize.
    7916  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    7917  if(freeSuballocCount > 0)
    7918  {
    7920  {
    7921  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7922  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7923  m_FreeSuballocationsBySize.data(),
    7924  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7925  allocSize + 2 * VMA_DEBUG_MARGIN,
    7926  VmaSuballocationItemSizeLess());
    7927  size_t index = it - m_FreeSuballocationsBySize.data();
    7928  for(; index < freeSuballocCount; ++index)
    7929  {
    7930  if(CheckAllocation(
    7931  currentFrameIndex,
    7932  frameInUseCount,
    7933  bufferImageGranularity,
    7934  allocSize,
    7935  allocAlignment,
    7936  allocType,
    7937  m_FreeSuballocationsBySize[index],
    7938  false, // canMakeOtherLost
    7939  &pAllocationRequest->offset,
    7940  &pAllocationRequest->itemsToMakeLostCount,
    7941  &pAllocationRequest->sumFreeSize,
    7942  &pAllocationRequest->sumItemSize))
    7943  {
    7944  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7945  return true;
    7946  }
    7947  }
    7948  }
    7949  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
    7950  {
    7951  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7952  it != m_Suballocations.end();
    7953  ++it)
    7954  {
    7955  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
    7956  currentFrameIndex,
    7957  frameInUseCount,
    7958  bufferImageGranularity,
    7959  allocSize,
    7960  allocAlignment,
    7961  allocType,
    7962  it,
    7963  false, // canMakeOtherLost
    7964  &pAllocationRequest->offset,
    7965  &pAllocationRequest->itemsToMakeLostCount,
    7966  &pAllocationRequest->sumFreeSize,
    7967  &pAllocationRequest->sumItemSize))
    7968  {
    7969  pAllocationRequest->item = it;
    7970  return true;
    7971  }
    7972  }
    7973  }
    7974  else // WORST_FIT, FIRST_FIT
    7975  {
    7976  // Search staring from biggest suballocations.
    7977  for(size_t index = freeSuballocCount; index--; )
    7978  {
    7979  if(CheckAllocation(
    7980  currentFrameIndex,
    7981  frameInUseCount,
    7982  bufferImageGranularity,
    7983  allocSize,
    7984  allocAlignment,
    7985  allocType,
    7986  m_FreeSuballocationsBySize[index],
    7987  false, // canMakeOtherLost
    7988  &pAllocationRequest->offset,
    7989  &pAllocationRequest->itemsToMakeLostCount,
    7990  &pAllocationRequest->sumFreeSize,
    7991  &pAllocationRequest->sumItemSize))
    7992  {
    7993  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7994  return true;
    7995  }
    7996  }
    7997  }
    7998  }
    7999 
    8000  if(canMakeOtherLost)
    8001  {
    8002  // Brute-force algorithm. TODO: Come up with something better.
    8003 
    8004  bool found = false;
    8005  VmaAllocationRequest tmpAllocRequest = {};
    8006  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
    8007  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    8008  suballocIt != m_Suballocations.end();
    8009  ++suballocIt)
    8010  {
    8011  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    8012  suballocIt->hAllocation->CanBecomeLost())
    8013  {
    8014  if(CheckAllocation(
    8015  currentFrameIndex,
    8016  frameInUseCount,
    8017  bufferImageGranularity,
    8018  allocSize,
    8019  allocAlignment,
    8020  allocType,
    8021  suballocIt,
    8022  canMakeOtherLost,
    8023  &tmpAllocRequest.offset,
    8024  &tmpAllocRequest.itemsToMakeLostCount,
    8025  &tmpAllocRequest.sumFreeSize,
    8026  &tmpAllocRequest.sumItemSize))
    8027  {
    8029  {
    8030  *pAllocationRequest = tmpAllocRequest;
    8031  pAllocationRequest->item = suballocIt;
    8032  break;
    8033  }
    8034  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    8035  {
    8036  *pAllocationRequest = tmpAllocRequest;
    8037  pAllocationRequest->item = suballocIt;
    8038  found = true;
    8039  }
    8040  }
    8041  }
    8042  }
    8043 
    8044  return found;
    8045  }
    8046 
    8047  return false;
    8048 }
    8049 
    8050 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    8051  uint32_t currentFrameIndex,
    8052  uint32_t frameInUseCount,
    8053  VmaAllocationRequest* pAllocationRequest)
    8054 {
    8055  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
    8056 
    8057  while(pAllocationRequest->itemsToMakeLostCount > 0)
    8058  {
    8059  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    8060  {
    8061  ++pAllocationRequest->item;
    8062  }
    8063  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8064  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    8065  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    8066  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8067  {
    8068  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    8069  --pAllocationRequest->itemsToMakeLostCount;
    8070  }
    8071  else
    8072  {
    8073  return false;
    8074  }
    8075  }
    8076 
    8077  VMA_HEAVY_ASSERT(Validate());
    8078  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8079  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8080 
    8081  return true;
    8082 }
    8083 
    8084 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8085 {
    8086  uint32_t lostAllocationCount = 0;
    8087  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8088  it != m_Suballocations.end();
    8089  ++it)
    8090  {
    8091  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    8092  it->hAllocation->CanBecomeLost() &&
    8093  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8094  {
    8095  it = FreeSuballocation(it);
    8096  ++lostAllocationCount;
    8097  }
    8098  }
    8099  return lostAllocationCount;
    8100 }
    8101 
    8102 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    8103 {
    8104  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8105  it != m_Suballocations.end();
    8106  ++it)
    8107  {
    8108  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    8109  {
    8110  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    8111  {
    8112  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8113  return VK_ERROR_VALIDATION_FAILED_EXT;
    8114  }
    8115  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    8116  {
    8117  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8118  return VK_ERROR_VALIDATION_FAILED_EXT;
    8119  }
    8120  }
    8121  }
    8122 
    8123  return VK_SUCCESS;
    8124 }
    8125 
    8126 void VmaBlockMetadata_Generic::Alloc(
    8127  const VmaAllocationRequest& request,
    8128  VmaSuballocationType type,
    8129  VkDeviceSize allocSize,
    8130  VmaAllocation hAllocation)
    8131 {
    8132  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    8133  VMA_ASSERT(request.item != m_Suballocations.end());
    8134  VmaSuballocation& suballoc = *request.item;
    8135  // Given suballocation is a free block.
    8136  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8137  // Given offset is inside this suballocation.
    8138  VMA_ASSERT(request.offset >= suballoc.offset);
    8139  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    8140  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    8141  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    8142 
    8143  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    8144  // it to become used.
    8145  UnregisterFreeSuballocation(request.item);
    8146 
    8147  suballoc.offset = request.offset;
    8148  suballoc.size = allocSize;
    8149  suballoc.type = type;
    8150  suballoc.hAllocation = hAllocation;
    8151 
    8152  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    8153  if(paddingEnd)
    8154  {
    8155  VmaSuballocation paddingSuballoc = {};
    8156  paddingSuballoc.offset = request.offset + allocSize;
    8157  paddingSuballoc.size = paddingEnd;
    8158  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8159  VmaSuballocationList::iterator next = request.item;
    8160  ++next;
    8161  const VmaSuballocationList::iterator paddingEndItem =
    8162  m_Suballocations.insert(next, paddingSuballoc);
    8163  RegisterFreeSuballocation(paddingEndItem);
    8164  }
    8165 
    8166  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    8167  if(paddingBegin)
    8168  {
    8169  VmaSuballocation paddingSuballoc = {};
    8170  paddingSuballoc.offset = request.offset - paddingBegin;
    8171  paddingSuballoc.size = paddingBegin;
    8172  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8173  const VmaSuballocationList::iterator paddingBeginItem =
    8174  m_Suballocations.insert(request.item, paddingSuballoc);
    8175  RegisterFreeSuballocation(paddingBeginItem);
    8176  }
    8177 
    8178  // Update totals.
    8179  m_FreeCount = m_FreeCount - 1;
    8180  if(paddingBegin > 0)
    8181  {
    8182  ++m_FreeCount;
    8183  }
    8184  if(paddingEnd > 0)
    8185  {
    8186  ++m_FreeCount;
    8187  }
    8188  m_SumFreeSize -= allocSize;
    8189 }
    8190 
    8191 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    8192 {
    8193  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8194  suballocItem != m_Suballocations.end();
    8195  ++suballocItem)
    8196  {
    8197  VmaSuballocation& suballoc = *suballocItem;
    8198  if(suballoc.hAllocation == allocation)
    8199  {
    8200  FreeSuballocation(suballocItem);
    8201  VMA_HEAVY_ASSERT(Validate());
    8202  return;
    8203  }
    8204  }
    8205  VMA_ASSERT(0 && "Not found!");
    8206 }
    8207 
    8208 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    8209 {
    8210  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8211  suballocItem != m_Suballocations.end();
    8212  ++suballocItem)
    8213  {
    8214  VmaSuballocation& suballoc = *suballocItem;
    8215  if(suballoc.offset == offset)
    8216  {
    8217  FreeSuballocation(suballocItem);
    8218  return;
    8219  }
    8220  }
    8221  VMA_ASSERT(0 && "Not found!");
    8222 }
    8223 
    8224 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    8225 {
    8226  typedef VmaSuballocationList::iterator iter_type;
    8227  for(iter_type suballocItem = m_Suballocations.begin();
    8228  suballocItem != m_Suballocations.end();
    8229  ++suballocItem)
    8230  {
    8231  VmaSuballocation& suballoc = *suballocItem;
    8232  if(suballoc.hAllocation == alloc)
    8233  {
    8234  iter_type nextItem = suballocItem;
    8235  ++nextItem;
    8236 
    8237  // Should have been ensured on higher level.
    8238  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    8239 
    8240  // Shrinking.
    8241  if(newSize < alloc->GetSize())
    8242  {
    8243  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    8244 
    8245  // There is next item.
    8246  if(nextItem != m_Suballocations.end())
    8247  {
    8248  // Next item is free.
    8249  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8250  {
    8251  // Grow this next item backward.
    8252  UnregisterFreeSuballocation(nextItem);
    8253  nextItem->offset -= sizeDiff;
    8254  nextItem->size += sizeDiff;
    8255  RegisterFreeSuballocation(nextItem);
    8256  }
    8257  // Next item is not free.
    8258  else
    8259  {
    8260  // Create free item after current one.
    8261  VmaSuballocation newFreeSuballoc;
    8262  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8263  newFreeSuballoc.offset = suballoc.offset + newSize;
    8264  newFreeSuballoc.size = sizeDiff;
    8265  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8266  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    8267  RegisterFreeSuballocation(newFreeSuballocIt);
    8268 
    8269  ++m_FreeCount;
    8270  }
    8271  }
    8272  // This is the last item.
    8273  else
    8274  {
    8275  // Create free item at the end.
    8276  VmaSuballocation newFreeSuballoc;
    8277  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8278  newFreeSuballoc.offset = suballoc.offset + newSize;
    8279  newFreeSuballoc.size = sizeDiff;
    8280  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8281  m_Suballocations.push_back(newFreeSuballoc);
    8282 
    8283  iter_type newFreeSuballocIt = m_Suballocations.end();
    8284  RegisterFreeSuballocation(--newFreeSuballocIt);
    8285 
    8286  ++m_FreeCount;
    8287  }
    8288 
    8289  suballoc.size = newSize;
    8290  m_SumFreeSize += sizeDiff;
    8291  }
    8292  // Growing.
    8293  else
    8294  {
    8295  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    8296 
    8297  // There is next item.
    8298  if(nextItem != m_Suballocations.end())
    8299  {
    8300  // Next item is free.
    8301  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8302  {
    8303  // There is not enough free space, including margin.
    8304  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    8305  {
    8306  return false;
    8307  }
    8308 
    8309  // There is more free space than required.
    8310  if(nextItem->size > sizeDiff)
    8311  {
    8312  // Move and shrink this next item.
    8313  UnregisterFreeSuballocation(nextItem);
    8314  nextItem->offset += sizeDiff;
    8315  nextItem->size -= sizeDiff;
    8316  RegisterFreeSuballocation(nextItem);
    8317  }
    8318  // There is exactly the amount of free space required.
    8319  else
    8320  {
    8321  // Remove this next free item.
    8322  UnregisterFreeSuballocation(nextItem);
    8323  m_Suballocations.erase(nextItem);
    8324  --m_FreeCount;
    8325  }
    8326  }
    8327  // Next item is not free - there is no space to grow.
    8328  else
    8329  {
    8330  return false;
    8331  }
    8332  }
    8333  // This is the last item - there is no space to grow.
    8334  else
    8335  {
    8336  return false;
    8337  }
    8338 
    8339  suballoc.size = newSize;
    8340  m_SumFreeSize -= sizeDiff;
    8341  }
    8342 
    8343  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    8344  return true;
    8345  }
    8346  }
    8347  VMA_ASSERT(0 && "Not found!");
    8348  return false;
    8349 }
    8350 
    8351 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    8352 {
    8353  VkDeviceSize lastSize = 0;
    8354  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    8355  {
    8356  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    8357 
    8358  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    8359  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    8360  VMA_VALIDATE(it->size >= lastSize);
    8361  lastSize = it->size;
    8362  }
    8363  return true;
    8364 }
    8365 
    8366 bool VmaBlockMetadata_Generic::CheckAllocation(
    8367  uint32_t currentFrameIndex,
    8368  uint32_t frameInUseCount,
    8369  VkDeviceSize bufferImageGranularity,
    8370  VkDeviceSize allocSize,
    8371  VkDeviceSize allocAlignment,
    8372  VmaSuballocationType allocType,
    8373  VmaSuballocationList::const_iterator suballocItem,
    8374  bool canMakeOtherLost,
    8375  VkDeviceSize* pOffset,
    8376  size_t* itemsToMakeLostCount,
    8377  VkDeviceSize* pSumFreeSize,
    8378  VkDeviceSize* pSumItemSize) const
    8379 {
    8380  VMA_ASSERT(allocSize > 0);
    8381  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8382  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    8383  VMA_ASSERT(pOffset != VMA_NULL);
    8384 
    8385  *itemsToMakeLostCount = 0;
    8386  *pSumFreeSize = 0;
    8387  *pSumItemSize = 0;
    8388 
    8389  if(canMakeOtherLost)
    8390  {
    8391  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8392  {
    8393  *pSumFreeSize = suballocItem->size;
    8394  }
    8395  else
    8396  {
    8397  if(suballocItem->hAllocation->CanBecomeLost() &&
    8398  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8399  {
    8400  ++*itemsToMakeLostCount;
    8401  *pSumItemSize = suballocItem->size;
    8402  }
    8403  else
    8404  {
    8405  return false;
    8406  }
    8407  }
    8408 
    8409  // Remaining size is too small for this request: Early return.
    8410  if(GetSize() - suballocItem->offset < allocSize)
    8411  {
    8412  return false;
    8413  }
    8414 
    8415  // Start from offset equal to beginning of this suballocation.
    8416  *pOffset = suballocItem->offset;
    8417 
    8418  // Apply VMA_DEBUG_MARGIN at the beginning.
    8419  if(VMA_DEBUG_MARGIN > 0)
    8420  {
    8421  *pOffset += VMA_DEBUG_MARGIN;
    8422  }
    8423 
    8424  // Apply alignment.
    8425  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8426 
    8427  // Check previous suballocations for BufferImageGranularity conflicts.
    8428  // Make bigger alignment if necessary.
    8429  if(bufferImageGranularity > 1)
    8430  {
    8431  bool bufferImageGranularityConflict = false;
    8432  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8433  while(prevSuballocItem != m_Suballocations.cbegin())
    8434  {
    8435  --prevSuballocItem;
    8436  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8437  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8438  {
    8439  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8440  {
    8441  bufferImageGranularityConflict = true;
    8442  break;
    8443  }
    8444  }
    8445  else
    8446  // Already on previous page.
    8447  break;
    8448  }
    8449  if(bufferImageGranularityConflict)
    8450  {
    8451  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8452  }
    8453  }
    8454 
    8455  // Now that we have final *pOffset, check if we are past suballocItem.
    8456  // If yes, return false - this function should be called for another suballocItem as starting point.
    8457  if(*pOffset >= suballocItem->offset + suballocItem->size)
    8458  {
    8459  return false;
    8460  }
    8461 
    8462  // Calculate padding at the beginning based on current offset.
    8463  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    8464 
    8465  // Calculate required margin at the end.
    8466  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8467 
    8468  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    8469  // Another early return check.
    8470  if(suballocItem->offset + totalSize > GetSize())
    8471  {
    8472  return false;
    8473  }
    8474 
    8475  // Advance lastSuballocItem until desired size is reached.
    8476  // Update itemsToMakeLostCount.
    8477  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    8478  if(totalSize > suballocItem->size)
    8479  {
    8480  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    8481  while(remainingSize > 0)
    8482  {
    8483  ++lastSuballocItem;
    8484  if(lastSuballocItem == m_Suballocations.cend())
    8485  {
    8486  return false;
    8487  }
    8488  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8489  {
    8490  *pSumFreeSize += lastSuballocItem->size;
    8491  }
    8492  else
    8493  {
    8494  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    8495  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    8496  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8497  {
    8498  ++*itemsToMakeLostCount;
    8499  *pSumItemSize += lastSuballocItem->size;
    8500  }
    8501  else
    8502  {
    8503  return false;
    8504  }
    8505  }
    8506  remainingSize = (lastSuballocItem->size < remainingSize) ?
    8507  remainingSize - lastSuballocItem->size : 0;
    8508  }
    8509  }
    8510 
    8511  // Check next suballocations for BufferImageGranularity conflicts.
    8512  // If conflict exists, we must mark more allocations lost or fail.
    8513  if(bufferImageGranularity > 1)
    8514  {
    8515  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    8516  ++nextSuballocItem;
    8517  while(nextSuballocItem != m_Suballocations.cend())
    8518  {
    8519  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8520  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8521  {
    8522  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8523  {
    8524  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    8525  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    8526  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8527  {
    8528  ++*itemsToMakeLostCount;
    8529  }
    8530  else
    8531  {
    8532  return false;
    8533  }
    8534  }
    8535  }
    8536  else
    8537  {
    8538  // Already on next page.
    8539  break;
    8540  }
    8541  ++nextSuballocItem;
    8542  }
    8543  }
    8544  }
    8545  else
    8546  {
    8547  const VmaSuballocation& suballoc = *suballocItem;
    8548  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8549 
    8550  *pSumFreeSize = suballoc.size;
    8551 
    8552  // Size of this suballocation is too small for this request: Early return.
    8553  if(suballoc.size < allocSize)
    8554  {
    8555  return false;
    8556  }
    8557 
    8558  // Start from offset equal to beginning of this suballocation.
    8559  *pOffset = suballoc.offset;
    8560 
    8561  // Apply VMA_DEBUG_MARGIN at the beginning.
    8562  if(VMA_DEBUG_MARGIN > 0)
    8563  {
    8564  *pOffset += VMA_DEBUG_MARGIN;
    8565  }
    8566 
    8567  // Apply alignment.
    8568  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8569 
    8570  // Check previous suballocations for BufferImageGranularity conflicts.
    8571  // Make bigger alignment if necessary.
    8572  if(bufferImageGranularity > 1)
    8573  {
    8574  bool bufferImageGranularityConflict = false;
    8575  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8576  while(prevSuballocItem != m_Suballocations.cbegin())
    8577  {
    8578  --prevSuballocItem;
    8579  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8580  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8581  {
    8582  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8583  {
    8584  bufferImageGranularityConflict = true;
    8585  break;
    8586  }
    8587  }
    8588  else
    8589  // Already on previous page.
    8590  break;
    8591  }
    8592  if(bufferImageGranularityConflict)
    8593  {
    8594  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8595  }
    8596  }
    8597 
    8598  // Calculate padding at the beginning based on current offset.
    8599  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    8600 
    8601  // Calculate required margin at the end.
    8602  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8603 
    8604  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    8605  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    8606  {
    8607  return false;
    8608  }
    8609 
    8610  // Check next suballocations for BufferImageGranularity conflicts.
    8611  // If conflict exists, allocation cannot be made here.
    8612  if(bufferImageGranularity > 1)
    8613  {
    8614  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    8615  ++nextSuballocItem;
    8616  while(nextSuballocItem != m_Suballocations.cend())
    8617  {
    8618  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8619  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8620  {
    8621  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8622  {
    8623  return false;
    8624  }
    8625  }
    8626  else
    8627  {
    8628  // Already on next page.
    8629  break;
    8630  }
    8631  ++nextSuballocItem;
    8632  }
    8633  }
    8634  }
    8635 
    8636  // All tests passed: Success. pOffset is already filled.
    8637  return true;
    8638 }
    8639 
    8640 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    8641 {
    8642  VMA_ASSERT(item != m_Suballocations.end());
    8643  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8644 
    8645  VmaSuballocationList::iterator nextItem = item;
    8646  ++nextItem;
    8647  VMA_ASSERT(nextItem != m_Suballocations.end());
    8648  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    8649 
    8650  item->size += nextItem->size;
    8651  --m_FreeCount;
    8652  m_Suballocations.erase(nextItem);
    8653 }
    8654 
    8655 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    8656 {
    8657  // Change this suballocation to be marked as free.
    8658  VmaSuballocation& suballoc = *suballocItem;
    8659  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8660  suballoc.hAllocation = VK_NULL_HANDLE;
    8661 
    8662  // Update totals.
    8663  ++m_FreeCount;
    8664  m_SumFreeSize += suballoc.size;
    8665 
    8666  // Merge with previous and/or next suballocation if it's also free.
    8667  bool mergeWithNext = false;
    8668  bool mergeWithPrev = false;
    8669 
    8670  VmaSuballocationList::iterator nextItem = suballocItem;
    8671  ++nextItem;
    8672  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    8673  {
    8674  mergeWithNext = true;
    8675  }
    8676 
    8677  VmaSuballocationList::iterator prevItem = suballocItem;
    8678  if(suballocItem != m_Suballocations.begin())
    8679  {
    8680  --prevItem;
    8681  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8682  {
    8683  mergeWithPrev = true;
    8684  }
    8685  }
    8686 
    8687  if(mergeWithNext)
    8688  {
    8689  UnregisterFreeSuballocation(nextItem);
    8690  MergeFreeWithNext(suballocItem);
    8691  }
    8692 
    8693  if(mergeWithPrev)
    8694  {
    8695  UnregisterFreeSuballocation(prevItem);
    8696  MergeFreeWithNext(prevItem);
    8697  RegisterFreeSuballocation(prevItem);
    8698  return prevItem;
    8699  }
    8700  else
    8701  {
    8702  RegisterFreeSuballocation(suballocItem);
    8703  return suballocItem;
    8704  }
    8705 }
    8706 
    8707 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    8708 {
    8709  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8710  VMA_ASSERT(item->size > 0);
    8711 
    8712  // You may want to enable this validation at the beginning or at the end of
    8713  // this function, depending on what do you want to check.
    8714  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8715 
    8716  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8717  {
    8718  if(m_FreeSuballocationsBySize.empty())
    8719  {
    8720  m_FreeSuballocationsBySize.push_back(item);
    8721  }
    8722  else
    8723  {
    8724  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    8725  }
    8726  }
    8727 
    8728  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8729 }
    8730 
    8731 
    8732 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    8733 {
    8734  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8735  VMA_ASSERT(item->size > 0);
    8736 
    8737  // You may want to enable this validation at the beginning or at the end of
    8738  // this function, depending on what do you want to check.
    8739  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8740 
    8741  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8742  {
    8743  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8744  m_FreeSuballocationsBySize.data(),
    8745  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    8746  item,
    8747  VmaSuballocationItemSizeLess());
    8748  for(size_t index = it - m_FreeSuballocationsBySize.data();
    8749  index < m_FreeSuballocationsBySize.size();
    8750  ++index)
    8751  {
    8752  if(m_FreeSuballocationsBySize[index] == item)
    8753  {
    8754  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    8755  return;
    8756  }
    8757  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    8758  }
    8759  VMA_ASSERT(0 && "Not found.");
    8760  }
    8761 
    8762  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8763 }
    8764 
    8765 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
    8766  VkDeviceSize bufferImageGranularity,
    8767  VmaSuballocationType& inOutPrevSuballocType) const
    8768 {
    8769  if(bufferImageGranularity == 1 || IsEmpty())
    8770  {
    8771  return false;
    8772  }
    8773 
    8774  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
    8775  bool typeConflictFound = false;
    8776  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
    8777  it != m_Suballocations.cend();
    8778  ++it)
    8779  {
    8780  const VmaSuballocationType suballocType = it->type;
    8781  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
    8782  {
    8783  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
    8784  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
    8785  {
    8786  typeConflictFound = true;
    8787  }
    8788  inOutPrevSuballocType = suballocType;
    8789  }
    8790  }
    8791 
    8792  return typeConflictFound || minAlignment >= bufferImageGranularity;
    8793 }
    8794 
    8796 // class VmaBlockMetadata_Linear
    8797 
    8798 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    8799  VmaBlockMetadata(hAllocator),
    8800  m_SumFreeSize(0),
    8801  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8802  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8803  m_1stVectorIndex(0),
    8804  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    8805  m_1stNullItemsBeginCount(0),
    8806  m_1stNullItemsMiddleCount(0),
    8807  m_2ndNullItemsCount(0)
    8808 {
    8809 }
    8810 
    8811 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    8812 {
    8813 }
    8814 
    8815 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    8816 {
    8817  VmaBlockMetadata::Init(size);
    8818  m_SumFreeSize = size;
    8819 }
    8820 
    8821 bool VmaBlockMetadata_Linear::Validate() const
    8822 {
    8823  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8824  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8825 
    8826  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    8827  VMA_VALIDATE(!suballocations1st.empty() ||
    8828  suballocations2nd.empty() ||
    8829  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    8830 
    8831  if(!suballocations1st.empty())
    8832  {
    8833  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    8834  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    8835  // Null item at the end should be just pop_back().
    8836  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    8837  }
    8838  if(!suballocations2nd.empty())
    8839  {
    8840  // Null item at the end should be just pop_back().
    8841  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    8842  }
    8843 
    8844  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    8845  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    8846 
    8847  VkDeviceSize sumUsedSize = 0;
    8848  const size_t suballoc1stCount = suballocations1st.size();
    8849  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    8850 
    8851  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8852  {
    8853  const size_t suballoc2ndCount = suballocations2nd.size();
    8854  size_t nullItem2ndCount = 0;
    8855  for(size_t i = 0; i < suballoc2ndCount; ++i)
    8856  {
    8857  const VmaSuballocation& suballoc = suballocations2nd[i];
    8858  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8859 
    8860  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8861  VMA_VALIDATE(suballoc.offset >= offset);
    8862 
    8863  if(!currFree)
    8864  {
    8865  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8866  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8867  sumUsedSize += suballoc.size;
    8868  }
    8869  else
    8870  {
    8871  ++nullItem2ndCount;
    8872  }
    8873 
    8874  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8875  }
    8876 
    8877  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8878  }
    8879 
    8880  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    8881  {
    8882  const VmaSuballocation& suballoc = suballocations1st[i];
    8883  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    8884  suballoc.hAllocation == VK_NULL_HANDLE);
    8885  }
    8886 
    8887  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    8888 
    8889  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    8890  {
    8891  const VmaSuballocation& suballoc = suballocations1st[i];
    8892  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8893 
    8894  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8895  VMA_VALIDATE(suballoc.offset >= offset);
    8896  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    8897 
    8898  if(!currFree)
    8899  {
    8900  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8901  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8902  sumUsedSize += suballoc.size;
    8903  }
    8904  else
    8905  {
    8906  ++nullItem1stCount;
    8907  }
    8908 
    8909  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8910  }
    8911  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    8912 
    8913  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8914  {
    8915  const size_t suballoc2ndCount = suballocations2nd.size();
    8916  size_t nullItem2ndCount = 0;
    8917  for(size_t i = suballoc2ndCount; i--; )
    8918  {
    8919  const VmaSuballocation& suballoc = suballocations2nd[i];
    8920  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8921 
    8922  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8923  VMA_VALIDATE(suballoc.offset >= offset);
    8924 
    8925  if(!currFree)
    8926  {
    8927  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8928  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8929  sumUsedSize += suballoc.size;
    8930  }
    8931  else
    8932  {
    8933  ++nullItem2ndCount;
    8934  }
    8935 
    8936  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8937  }
    8938 
    8939  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8940  }
    8941 
    8942  VMA_VALIDATE(offset <= GetSize());
    8943  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    8944 
    8945  return true;
    8946 }
    8947 
    8948 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    8949 {
    8950  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    8951  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    8952 }
    8953 
    8954 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    8955 {
    8956  const VkDeviceSize size = GetSize();
    8957 
    8958  /*
    8959  We don't consider gaps inside allocation vectors with freed allocations because
    8960  they are not suitable for reuse in linear allocator. We consider only space that
    8961  is available for new allocations.
    8962  */
    8963  if(IsEmpty())
    8964  {
    8965  return size;
    8966  }
    8967 
    8968  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8969 
    8970  switch(m_2ndVectorMode)
    8971  {
    8972  case SECOND_VECTOR_EMPTY:
    8973  /*
    8974  Available space is after end of 1st, as well as before beginning of 1st (which
    8975  whould make it a ring buffer).
    8976  */
    8977  {
    8978  const size_t suballocations1stCount = suballocations1st.size();
    8979  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8980  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8981  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8982  return VMA_MAX(
    8983  firstSuballoc.offset,
    8984  size - (lastSuballoc.offset + lastSuballoc.size));
    8985  }
    8986  break;
    8987 
    8988  case SECOND_VECTOR_RING_BUFFER:
    8989  /*
    8990  Available space is only between end of 2nd and beginning of 1st.
    8991  */
    8992  {
    8993  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8994  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    8995  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    8996  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    8997  }
    8998  break;
    8999 
    9000  case SECOND_VECTOR_DOUBLE_STACK:
    9001  /*
    9002  Available space is only between end of 1st and top of 2nd.
    9003  */
    9004  {
    9005  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9006  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    9007  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    9008  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    9009  }
    9010  break;
    9011 
    9012  default:
    9013  VMA_ASSERT(0);
    9014  return 0;
    9015  }
    9016 }
    9017 
    9018 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9019 {
    9020  const VkDeviceSize size = GetSize();
    9021  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9022  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9023  const size_t suballoc1stCount = suballocations1st.size();
    9024  const size_t suballoc2ndCount = suballocations2nd.size();
    9025 
    9026  outInfo.blockCount = 1;
    9027  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    9028  outInfo.unusedRangeCount = 0;
    9029  outInfo.usedBytes = 0;
    9030  outInfo.allocationSizeMin = UINT64_MAX;
    9031  outInfo.allocationSizeMax = 0;
    9032  outInfo.unusedRangeSizeMin = UINT64_MAX;
    9033  outInfo.unusedRangeSizeMax = 0;
    9034 
    9035  VkDeviceSize lastOffset = 0;
    9036 
    9037  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9038  {
    9039  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9040  size_t nextAlloc2ndIndex = 0;
    9041  while(lastOffset < freeSpace2ndTo1stEnd)
    9042  {
    9043  // Find next non-null allocation or move nextAllocIndex to the end.
    9044  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9045  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9046  {
    9047  ++nextAlloc2ndIndex;
    9048  }
    9049 
    9050  // Found non-null allocation.
    9051  if(nextAlloc2ndIndex < suballoc2ndCount)
    9052  {
    9053  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9054 
    9055  // 1. Process free space before this allocation.
    9056  if(lastOffset < suballoc.offset)
    9057  {
    9058  // There is free space from lastOffset to suballoc.offset.
    9059  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9060  ++outInfo.unusedRangeCount;
    9061  outInfo.unusedBytes += unusedRangeSize;
    9062  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9063  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9064  }
    9065 
    9066  // 2. Process this allocation.
    9067  // There is allocation with suballoc.offset, suballoc.size.
    9068  outInfo.usedBytes += suballoc.size;
    9069  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9070  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9071 
    9072  // 3. Prepare for next iteration.
    9073  lastOffset = suballoc.offset + suballoc.size;
    9074  ++nextAlloc2ndIndex;
    9075  }
    9076  // We are at the end.
    9077  else
    9078  {
    9079  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9080  if(lastOffset < freeSpace2ndTo1stEnd)
    9081  {
    9082  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9083  ++outInfo.unusedRangeCount;
    9084  outInfo.unusedBytes += unusedRangeSize;
    9085  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9086  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9087  }
    9088 
    9089  // End of loop.
    9090  lastOffset = freeSpace2ndTo1stEnd;
    9091  }
    9092  }
    9093  }
    9094 
    9095  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9096  const VkDeviceSize freeSpace1stTo2ndEnd =
    9097  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9098  while(lastOffset < freeSpace1stTo2ndEnd)
    9099  {
    9100  // Find next non-null allocation or move nextAllocIndex to the end.
    9101  while(nextAlloc1stIndex < suballoc1stCount &&
    9102  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9103  {
    9104  ++nextAlloc1stIndex;
    9105  }
    9106 
    9107  // Found non-null allocation.
    9108  if(nextAlloc1stIndex < suballoc1stCount)
    9109  {
    9110  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9111 
    9112  // 1. Process free space before this allocation.
    9113  if(lastOffset < suballoc.offset)
    9114  {
    9115  // There is free space from lastOffset to suballoc.offset.
    9116  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9117  ++outInfo.unusedRangeCount;
    9118  outInfo.unusedBytes += unusedRangeSize;
    9119  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9120  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9121  }
    9122 
    9123  // 2. Process this allocation.
    9124  // There is allocation with suballoc.offset, suballoc.size.
    9125  outInfo.usedBytes += suballoc.size;
    9126  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9127  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9128 
    9129  // 3. Prepare for next iteration.
    9130  lastOffset = suballoc.offset + suballoc.size;
    9131  ++nextAlloc1stIndex;
    9132  }
    9133  // We are at the end.
    9134  else
    9135  {
    9136  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9137  if(lastOffset < freeSpace1stTo2ndEnd)
    9138  {
    9139  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9140  ++outInfo.unusedRangeCount;
    9141  outInfo.unusedBytes += unusedRangeSize;
    9142  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9143  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9144  }
    9145 
    9146  // End of loop.
    9147  lastOffset = freeSpace1stTo2ndEnd;
    9148  }
    9149  }
    9150 
    9151  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9152  {
    9153  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9154  while(lastOffset < size)
    9155  {
    9156  // Find next non-null allocation or move nextAllocIndex to the end.
    9157  while(nextAlloc2ndIndex != SIZE_MAX &&
    9158  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9159  {
    9160  --nextAlloc2ndIndex;
    9161  }
    9162 
    9163  // Found non-null allocation.
    9164  if(nextAlloc2ndIndex != SIZE_MAX)
    9165  {
    9166  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9167 
    9168  // 1. Process free space before this allocation.
    9169  if(lastOffset < suballoc.offset)
    9170  {
    9171  // There is free space from lastOffset to suballoc.offset.
    9172  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9173  ++outInfo.unusedRangeCount;
    9174  outInfo.unusedBytes += unusedRangeSize;
    9175  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9176  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9177  }
    9178 
    9179  // 2. Process this allocation.
    9180  // There is allocation with suballoc.offset, suballoc.size.
    9181  outInfo.usedBytes += suballoc.size;
    9182  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9183  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9184 
    9185  // 3. Prepare for next iteration.
    9186  lastOffset = suballoc.offset + suballoc.size;
    9187  --nextAlloc2ndIndex;
    9188  }
    9189  // We are at the end.
    9190  else
    9191  {
    9192  // There is free space from lastOffset to size.
    9193  if(lastOffset < size)
    9194  {
    9195  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9196  ++outInfo.unusedRangeCount;
    9197  outInfo.unusedBytes += unusedRangeSize;
    9198  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9199  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9200  }
    9201 
    9202  // End of loop.
    9203  lastOffset = size;
    9204  }
    9205  }
    9206  }
    9207 
    9208  outInfo.unusedBytes = size - outInfo.usedBytes;
    9209 }
    9210 
    9211 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    9212 {
    9213  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9214  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9215  const VkDeviceSize size = GetSize();
    9216  const size_t suballoc1stCount = suballocations1st.size();
    9217  const size_t suballoc2ndCount = suballocations2nd.size();
    9218 
    9219  inoutStats.size += size;
    9220 
    9221  VkDeviceSize lastOffset = 0;
    9222 
    9223  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9224  {
    9225  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9226  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    9227  while(lastOffset < freeSpace2ndTo1stEnd)
    9228  {
    9229  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9230  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9231  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9232  {
    9233  ++nextAlloc2ndIndex;
    9234  }
    9235 
    9236  // Found non-null allocation.
    9237  if(nextAlloc2ndIndex < suballoc2ndCount)
    9238  {
    9239  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9240 
    9241  // 1. Process free space before this allocation.
    9242  if(lastOffset < suballoc.offset)
    9243  {
    9244  // There is free space from lastOffset to suballoc.offset.
    9245  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9246  inoutStats.unusedSize += unusedRangeSize;
    9247  ++inoutStats.unusedRangeCount;
    9248  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9249  }
    9250 
    9251  // 2. Process this allocation.
    9252  // There is allocation with suballoc.offset, suballoc.size.
    9253  ++inoutStats.allocationCount;
    9254 
    9255  // 3. Prepare for next iteration.
    9256  lastOffset = suballoc.offset + suballoc.size;
    9257  ++nextAlloc2ndIndex;
    9258  }
    9259  // We are at the end.
    9260  else
    9261  {
    9262  if(lastOffset < freeSpace2ndTo1stEnd)
    9263  {
    9264  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9265  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9266  inoutStats.unusedSize += unusedRangeSize;
    9267  ++inoutStats.unusedRangeCount;
    9268  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9269  }
    9270 
    9271  // End of loop.
    9272  lastOffset = freeSpace2ndTo1stEnd;
    9273  }
    9274  }
    9275  }
    9276 
    9277  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9278  const VkDeviceSize freeSpace1stTo2ndEnd =
    9279  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9280  while(lastOffset < freeSpace1stTo2ndEnd)
    9281  {
    9282  // Find next non-null allocation or move nextAllocIndex to the end.
    9283  while(nextAlloc1stIndex < suballoc1stCount &&
    9284  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9285  {
    9286  ++nextAlloc1stIndex;
    9287  }
    9288 
    9289  // Found non-null allocation.
    9290  if(nextAlloc1stIndex < suballoc1stCount)
    9291  {
    9292  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9293 
    9294  // 1. Process free space before this allocation.
    9295  if(lastOffset < suballoc.offset)
    9296  {
    9297  // There is free space from lastOffset to suballoc.offset.
    9298  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9299  inoutStats.unusedSize += unusedRangeSize;
    9300  ++inoutStats.unusedRangeCount;
    9301  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9302  }
    9303 
    9304  // 2. Process this allocation.
    9305  // There is allocation with suballoc.offset, suballoc.size.
    9306  ++inoutStats.allocationCount;
    9307 
    9308  // 3. Prepare for next iteration.
    9309  lastOffset = suballoc.offset + suballoc.size;
    9310  ++nextAlloc1stIndex;
    9311  }
    9312  // We are at the end.
    9313  else
    9314  {
    9315  if(lastOffset < freeSpace1stTo2ndEnd)
    9316  {
    9317  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9318  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9319  inoutStats.unusedSize += unusedRangeSize;
    9320  ++inoutStats.unusedRangeCount;
    9321  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9322  }
    9323 
    9324  // End of loop.
    9325  lastOffset = freeSpace1stTo2ndEnd;
    9326  }
    9327  }
    9328 
    9329  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9330  {
    9331  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9332  while(lastOffset < size)
    9333  {
    9334  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9335  while(nextAlloc2ndIndex != SIZE_MAX &&
    9336  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9337  {
    9338  --nextAlloc2ndIndex;
    9339  }
    9340 
    9341  // Found non-null allocation.
    9342  if(nextAlloc2ndIndex != SIZE_MAX)
    9343  {
    9344  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9345 
    9346  // 1. Process free space before this allocation.
    9347  if(lastOffset < suballoc.offset)
    9348  {
    9349  // There is free space from lastOffset to suballoc.offset.
    9350  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9351  inoutStats.unusedSize += unusedRangeSize;
    9352  ++inoutStats.unusedRangeCount;
    9353  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9354  }
    9355 
    9356  // 2. Process this allocation.
    9357  // There is allocation with suballoc.offset, suballoc.size.
    9358  ++inoutStats.allocationCount;
    9359 
    9360  // 3. Prepare for next iteration.
    9361  lastOffset = suballoc.offset + suballoc.size;
    9362  --nextAlloc2ndIndex;
    9363  }
    9364  // We are at the end.
    9365  else
    9366  {
    9367  if(lastOffset < size)
    9368  {
    9369  // There is free space from lastOffset to size.
    9370  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9371  inoutStats.unusedSize += unusedRangeSize;
    9372  ++inoutStats.unusedRangeCount;
    9373  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9374  }
    9375 
    9376  // End of loop.
    9377  lastOffset = size;
    9378  }
    9379  }
    9380  }
    9381 }
    9382 
    9383 #if VMA_STATS_STRING_ENABLED
    9384 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    9385 {
    9386  const VkDeviceSize size = GetSize();
    9387  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9388  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9389  const size_t suballoc1stCount = suballocations1st.size();
    9390  const size_t suballoc2ndCount = suballocations2nd.size();
    9391 
    9392  // FIRST PASS
    9393 
    9394  size_t unusedRangeCount = 0;
    9395  VkDeviceSize usedBytes = 0;
    9396 
    9397  VkDeviceSize lastOffset = 0;
    9398 
    9399  size_t alloc2ndCount = 0;
    9400  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9401  {
    9402  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9403  size_t nextAlloc2ndIndex = 0;
    9404  while(lastOffset < freeSpace2ndTo1stEnd)
    9405  {
    9406  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9407  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9408  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9409  {
    9410  ++nextAlloc2ndIndex;
    9411  }
    9412 
    9413  // Found non-null allocation.
    9414  if(nextAlloc2ndIndex < suballoc2ndCount)
    9415  {
    9416  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9417 
    9418  // 1. Process free space before this allocation.
    9419  if(lastOffset < suballoc.offset)
    9420  {
    9421  // There is free space from lastOffset to suballoc.offset.
    9422  ++unusedRangeCount;
    9423  }
    9424 
    9425  // 2. Process this allocation.
    9426  // There is allocation with suballoc.offset, suballoc.size.
    9427  ++alloc2ndCount;
    9428  usedBytes += suballoc.size;
    9429 
    9430  // 3. Prepare for next iteration.
    9431  lastOffset = suballoc.offset + suballoc.size;
    9432  ++nextAlloc2ndIndex;
    9433  }
    9434  // We are at the end.
    9435  else
    9436  {
    9437  if(lastOffset < freeSpace2ndTo1stEnd)
    9438  {
    9439  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9440  ++unusedRangeCount;
    9441  }
    9442 
    9443  // End of loop.
    9444  lastOffset = freeSpace2ndTo1stEnd;
    9445  }
    9446  }
    9447  }
    9448 
    9449  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9450  size_t alloc1stCount = 0;
    9451  const VkDeviceSize freeSpace1stTo2ndEnd =
    9452  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9453  while(lastOffset < freeSpace1stTo2ndEnd)
    9454  {
    9455  // Find next non-null allocation or move nextAllocIndex to the end.
    9456  while(nextAlloc1stIndex < suballoc1stCount &&
    9457  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9458  {
    9459  ++nextAlloc1stIndex;
    9460  }
    9461 
    9462  // Found non-null allocation.
    9463  if(nextAlloc1stIndex < suballoc1stCount)
    9464  {
    9465  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9466 
    9467  // 1. Process free space before this allocation.
    9468  if(lastOffset < suballoc.offset)
    9469  {
    9470  // There is free space from lastOffset to suballoc.offset.
    9471  ++unusedRangeCount;
    9472  }
    9473 
    9474  // 2. Process this allocation.
    9475  // There is allocation with suballoc.offset, suballoc.size.
    9476  ++alloc1stCount;
    9477  usedBytes += suballoc.size;
    9478 
    9479  // 3. Prepare for next iteration.
    9480  lastOffset = suballoc.offset + suballoc.size;
    9481  ++nextAlloc1stIndex;
    9482  }
    9483  // We are at the end.
    9484  else
    9485  {
    9486  if(lastOffset < size)
    9487  {
    9488  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9489  ++unusedRangeCount;
    9490  }
    9491 
    9492  // End of loop.
    9493  lastOffset = freeSpace1stTo2ndEnd;
    9494  }
    9495  }
    9496 
    9497  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9498  {
    9499  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9500  while(lastOffset < size)
    9501  {
    9502  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9503  while(nextAlloc2ndIndex != SIZE_MAX &&
    9504  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9505  {
    9506  --nextAlloc2ndIndex;
    9507  }
    9508 
    9509  // Found non-null allocation.
    9510  if(nextAlloc2ndIndex != SIZE_MAX)
    9511  {
    9512  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9513 
    9514  // 1. Process free space before this allocation.
    9515  if(lastOffset < suballoc.offset)
    9516  {
    9517  // There is free space from lastOffset to suballoc.offset.
    9518  ++unusedRangeCount;
    9519  }
    9520 
    9521  // 2. Process this allocation.
    9522  // There is allocation with suballoc.offset, suballoc.size.
    9523  ++alloc2ndCount;
    9524  usedBytes += suballoc.size;
    9525 
    9526  // 3. Prepare for next iteration.
    9527  lastOffset = suballoc.offset + suballoc.size;
    9528  --nextAlloc2ndIndex;
    9529  }
    9530  // We are at the end.
    9531  else
    9532  {
    9533  if(lastOffset < size)
    9534  {
    9535  // There is free space from lastOffset to size.
    9536  ++unusedRangeCount;
    9537  }
    9538 
    9539  // End of loop.
    9540  lastOffset = size;
    9541  }
    9542  }
    9543  }
    9544 
    9545  const VkDeviceSize unusedBytes = size - usedBytes;
    9546  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    9547 
    9548  // SECOND PASS
    9549  lastOffset = 0;
    9550 
    9551  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9552  {
    9553  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9554  size_t nextAlloc2ndIndex = 0;
    9555  while(lastOffset < freeSpace2ndTo1stEnd)
    9556  {
    9557  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9558  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9559  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9560  {
    9561  ++nextAlloc2ndIndex;
    9562  }
    9563 
    9564  // Found non-null allocation.
    9565  if(nextAlloc2ndIndex < suballoc2ndCount)
    9566  {
    9567  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9568 
    9569  // 1. Process free space before this allocation.
    9570  if(lastOffset < suballoc.offset)
    9571  {
    9572  // There is free space from lastOffset to suballoc.offset.
    9573  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9574  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9575  }
    9576 
    9577  // 2. Process this allocation.
    9578  // There is allocation with suballoc.offset, suballoc.size.
    9579  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9580 
    9581  // 3. Prepare for next iteration.
    9582  lastOffset = suballoc.offset + suballoc.size;
    9583  ++nextAlloc2ndIndex;
    9584  }
    9585  // We are at the end.
    9586  else
    9587  {
    9588  if(lastOffset < freeSpace2ndTo1stEnd)
    9589  {
    9590  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9591  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9592  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9593  }
    9594 
    9595  // End of loop.
    9596  lastOffset = freeSpace2ndTo1stEnd;
    9597  }
    9598  }
    9599  }
    9600 
    9601  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9602  while(lastOffset < freeSpace1stTo2ndEnd)
    9603  {
    9604  // Find next non-null allocation or move nextAllocIndex to the end.
    9605  while(nextAlloc1stIndex < suballoc1stCount &&
    9606  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9607  {
    9608  ++nextAlloc1stIndex;
    9609  }
    9610 
    9611  // Found non-null allocation.
    9612  if(nextAlloc1stIndex < suballoc1stCount)
    9613  {
    9614  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9615 
    9616  // 1. Process free space before this allocation.
    9617  if(lastOffset < suballoc.offset)
    9618  {
    9619  // There is free space from lastOffset to suballoc.offset.
    9620  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9621  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9622  }
    9623 
    9624  // 2. Process this allocation.
    9625  // There is allocation with suballoc.offset, suballoc.size.
    9626  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9627 
    9628  // 3. Prepare for next iteration.
    9629  lastOffset = suballoc.offset + suballoc.size;
    9630  ++nextAlloc1stIndex;
    9631  }
    9632  // We are at the end.
    9633  else
    9634  {
    9635  if(lastOffset < freeSpace1stTo2ndEnd)
    9636  {
    9637  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9638  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9639  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9640  }
    9641 
    9642  // End of loop.
    9643  lastOffset = freeSpace1stTo2ndEnd;
    9644  }
    9645  }
    9646 
    9647  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9648  {
    9649  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9650  while(lastOffset < size)
    9651  {
    9652  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9653  while(nextAlloc2ndIndex != SIZE_MAX &&
    9654  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9655  {
    9656  --nextAlloc2ndIndex;
    9657  }
    9658 
    9659  // Found non-null allocation.
    9660  if(nextAlloc2ndIndex != SIZE_MAX)
    9661  {
    9662  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9663 
    9664  // 1. Process free space before this allocation.
    9665  if(lastOffset < suballoc.offset)
    9666  {
    9667  // There is free space from lastOffset to suballoc.offset.
    9668  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9669  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9670  }
    9671 
    9672  // 2. Process this allocation.
    9673  // There is allocation with suballoc.offset, suballoc.size.
    9674  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9675 
    9676  // 3. Prepare for next iteration.
    9677  lastOffset = suballoc.offset + suballoc.size;
    9678  --nextAlloc2ndIndex;
    9679  }
    9680  // We are at the end.
    9681  else
    9682  {
    9683  if(lastOffset < size)
    9684  {
    9685  // There is free space from lastOffset to size.
    9686  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9687  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9688  }
    9689 
    9690  // End of loop.
    9691  lastOffset = size;
    9692  }
    9693  }
    9694  }
    9695 
    9696  PrintDetailedMap_End(json);
    9697 }
    9698 #endif // #if VMA_STATS_STRING_ENABLED
    9699 
    9700 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    9701  uint32_t currentFrameIndex,
    9702  uint32_t frameInUseCount,
    9703  VkDeviceSize bufferImageGranularity,
    9704  VkDeviceSize allocSize,
    9705  VkDeviceSize allocAlignment,
    9706  bool upperAddress,
    9707  VmaSuballocationType allocType,
    9708  bool canMakeOtherLost,
    9709  uint32_t strategy,
    9710  VmaAllocationRequest* pAllocationRequest)
    9711 {
    9712  VMA_ASSERT(allocSize > 0);
    9713  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    9714  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    9715  VMA_HEAVY_ASSERT(Validate());
    9716  return upperAddress ?
    9717  CreateAllocationRequest_UpperAddress(
    9718  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9719  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
    9720  CreateAllocationRequest_LowerAddress(
    9721  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9722  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
    9723 }
    9724 
    9725 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
    9726  uint32_t currentFrameIndex,
    9727  uint32_t frameInUseCount,
    9728  VkDeviceSize bufferImageGranularity,
    9729  VkDeviceSize allocSize,
    9730  VkDeviceSize allocAlignment,
    9731  VmaSuballocationType allocType,
    9732  bool canMakeOtherLost,
    9733  uint32_t strategy,
    9734  VmaAllocationRequest* pAllocationRequest)
    9735 {
    9736  const VkDeviceSize size = GetSize();
    9737  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9738  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9739 
    9740  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9741  {
    9742  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    9743  return false;
    9744  }
    9745 
    9746  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    9747  if(allocSize > size)
    9748  {
    9749  return false;
    9750  }
    9751  VkDeviceSize resultBaseOffset = size - allocSize;
    9752  if(!suballocations2nd.empty())
    9753  {
    9754  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9755  resultBaseOffset = lastSuballoc.offset - allocSize;
    9756  if(allocSize > lastSuballoc.offset)
    9757  {
    9758  return false;
    9759  }
    9760  }
    9761 
    9762  // Start from offset equal to end of free space.
    9763  VkDeviceSize resultOffset = resultBaseOffset;
    9764 
    9765  // Apply VMA_DEBUG_MARGIN at the end.
    9766  if(VMA_DEBUG_MARGIN > 0)
    9767  {
    9768  if(resultOffset < VMA_DEBUG_MARGIN)
    9769  {
    9770  return false;
    9771  }
    9772  resultOffset -= VMA_DEBUG_MARGIN;
    9773  }
    9774 
    9775  // Apply alignment.
    9776  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    9777 
    9778  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    9779  // Make bigger alignment if necessary.
    9780  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9781  {
    9782  bool bufferImageGranularityConflict = false;
    9783  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9784  {
    9785  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9786  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9787  {
    9788  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    9789  {
    9790  bufferImageGranularityConflict = true;
    9791  break;
    9792  }
    9793  }
    9794  else
    9795  // Already on previous page.
    9796  break;
    9797  }
    9798  if(bufferImageGranularityConflict)
    9799  {
    9800  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    9801  }
    9802  }
    9803 
    9804  // There is enough free space.
    9805  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    9806  suballocations1st.back().offset + suballocations1st.back().size :
    9807  0;
    9808  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    9809  {
    9810  // Check previous suballocations for BufferImageGranularity conflicts.
    9811  // If conflict exists, allocation cannot be made here.
    9812  if(bufferImageGranularity > 1)
    9813  {
    9814  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9815  {
    9816  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9817  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9818  {
    9819  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    9820  {
    9821  return false;
    9822  }
    9823  }
    9824  else
    9825  {
    9826  // Already on next page.
    9827  break;
    9828  }
    9829  }
    9830  }
    9831 
    9832  // All tests passed: Success.
    9833  pAllocationRequest->offset = resultOffset;
    9834  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    9835  pAllocationRequest->sumItemSize = 0;
    9836  // pAllocationRequest->item unused.
    9837  pAllocationRequest->itemsToMakeLostCount = 0;
    9838  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
    9839  return true;
    9840  }
    9841 
    9842  return false;
    9843 }
    9844 
    9845 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
    9846  uint32_t currentFrameIndex,
    9847  uint32_t frameInUseCount,
    9848  VkDeviceSize bufferImageGranularity,
    9849  VkDeviceSize allocSize,
    9850  VkDeviceSize allocAlignment,
    9851  VmaSuballocationType allocType,
    9852  bool canMakeOtherLost,
    9853  uint32_t strategy,
    9854  VmaAllocationRequest* pAllocationRequest)
    9855 {
    9856  const VkDeviceSize size = GetSize();
    9857  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9858  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9859 
    9860  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9861  {
    9862  // Try to allocate at the end of 1st vector.
    9863 
    9864  VkDeviceSize resultBaseOffset = 0;
    9865  if(!suballocations1st.empty())
    9866  {
    9867  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    9868  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9869  }
    9870 
    9871  // Start from offset equal to beginning of free space.
    9872  VkDeviceSize resultOffset = resultBaseOffset;
    9873 
    9874  // Apply VMA_DEBUG_MARGIN at the beginning.
    9875  if(VMA_DEBUG_MARGIN > 0)
    9876  {
    9877  resultOffset += VMA_DEBUG_MARGIN;
    9878  }
    9879 
    9880  // Apply alignment.
    9881  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9882 
    9883  // Check previous suballocations for BufferImageGranularity conflicts.
    9884  // Make bigger alignment if necessary.
    9885  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    9886  {
    9887  bool bufferImageGranularityConflict = false;
    9888  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9889  {
    9890  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9891  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9892  {
    9893  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9894  {
    9895  bufferImageGranularityConflict = true;
    9896  break;
    9897  }
    9898  }
    9899  else
    9900  // Already on previous page.
    9901  break;
    9902  }
    9903  if(bufferImageGranularityConflict)
    9904  {
    9905  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9906  }
    9907  }
    9908 
    9909  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    9910  suballocations2nd.back().offset : size;
    9911 
    9912  // There is enough free space at the end after alignment.
    9913  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    9914  {
    9915  // Check next suballocations for BufferImageGranularity conflicts.
    9916  // If conflict exists, allocation cannot be made here.
    9917  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9918  {
    9919  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9920  {
    9921  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9922  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9923  {
    9924  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9925  {
    9926  return false;
    9927  }
    9928  }
    9929  else
    9930  {
    9931  // Already on previous page.
    9932  break;
    9933  }
    9934  }
    9935  }
    9936 
    9937  // All tests passed: Success.
    9938  pAllocationRequest->offset = resultOffset;
    9939  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    9940  pAllocationRequest->sumItemSize = 0;
    9941  // pAllocationRequest->item, customData unused.
    9942  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
    9943  pAllocationRequest->itemsToMakeLostCount = 0;
    9944  return true;
    9945  }
    9946  }
    9947 
    9948  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    9949  // beginning of 1st vector as the end of free space.
    9950  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9951  {
    9952  VMA_ASSERT(!suballocations1st.empty());
    9953 
    9954  VkDeviceSize resultBaseOffset = 0;
    9955  if(!suballocations2nd.empty())
    9956  {
    9957  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9958  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9959  }
    9960 
    9961  // Start from offset equal to beginning of free space.
    9962  VkDeviceSize resultOffset = resultBaseOffset;
    9963 
    9964  // Apply VMA_DEBUG_MARGIN at the beginning.
    9965  if(VMA_DEBUG_MARGIN > 0)
    9966  {
    9967  resultOffset += VMA_DEBUG_MARGIN;
    9968  }
    9969 
    9970  // Apply alignment.
    9971  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9972 
    9973  // Check previous suballocations for BufferImageGranularity conflicts.
    9974  // Make bigger alignment if necessary.
    9975  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9976  {
    9977  bool bufferImageGranularityConflict = false;
    9978  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    9979  {
    9980  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    9981  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9982  {
    9983  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9984  {
    9985  bufferImageGranularityConflict = true;
    9986  break;
    9987  }
    9988  }
    9989  else
    9990  // Already on previous page.
    9991  break;
    9992  }
    9993  if(bufferImageGranularityConflict)
    9994  {
    9995  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9996  }
    9997  }
    9998 
    9999  pAllocationRequest->itemsToMakeLostCount = 0;
    10000  pAllocationRequest->sumItemSize = 0;
    10001  size_t index1st = m_1stNullItemsBeginCount;
    10002 
    10003  if(canMakeOtherLost)
    10004  {
    10005  while(index1st < suballocations1st.size() &&
    10006  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    10007  {
    10008  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    10009  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10010  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    10011  {
    10012  // No problem.
    10013  }
    10014  else
    10015  {
    10016  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10017  if(suballoc.hAllocation->CanBecomeLost() &&
    10018  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10019  {
    10020  ++pAllocationRequest->itemsToMakeLostCount;
    10021  pAllocationRequest->sumItemSize += suballoc.size;
    10022  }
    10023  else
    10024  {
    10025  return false;
    10026  }
    10027  }
    10028  ++index1st;
    10029  }
    10030 
    10031  // Check next suballocations for BufferImageGranularity conflicts.
    10032  // If conflict exists, we must mark more allocations lost or fail.
    10033  if(bufferImageGranularity > 1)
    10034  {
    10035  while(index1st < suballocations1st.size())
    10036  {
    10037  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10038  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    10039  {
    10040  if(suballoc.hAllocation != VK_NULL_HANDLE)
    10041  {
    10042  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    10043  if(suballoc.hAllocation->CanBecomeLost() &&
    10044  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10045  {
    10046  ++pAllocationRequest->itemsToMakeLostCount;
    10047  pAllocationRequest->sumItemSize += suballoc.size;
    10048  }
    10049  else
    10050  {
    10051  return false;
    10052  }
    10053  }
    10054  }
    10055  else
    10056  {
    10057  // Already on next page.
    10058  break;
    10059  }
    10060  ++index1st;
    10061  }
    10062  }
    10063 
    10064  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
    10065  if(index1st == suballocations1st.size() &&
    10066  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
    10067  {
    10068  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
    10069  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
    10070  }
    10071  }
    10072 
    10073  // There is enough free space at the end after alignment.
    10074  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
    10075  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    10076  {
    10077  // Check next suballocations for BufferImageGranularity conflicts.
    10078  // If conflict exists, allocation cannot be made here.
    10079  if(bufferImageGranularity > 1)
    10080  {
    10081  for(size_t nextSuballocIndex = index1st;
    10082  nextSuballocIndex < suballocations1st.size();
    10083  nextSuballocIndex++)
    10084  {
    10085  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    10086  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    10087  {
    10088  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    10089  {
    10090  return false;
    10091  }
    10092  }
    10093  else
    10094  {
    10095  // Already on next page.
    10096  break;
    10097  }
    10098  }
    10099  }
    10100 
    10101  // All tests passed: Success.
    10102  pAllocationRequest->offset = resultOffset;
    10103  pAllocationRequest->sumFreeSize =
    10104  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    10105  - resultBaseOffset
    10106  - pAllocationRequest->sumItemSize;
    10107  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
    10108  // pAllocationRequest->item, customData unused.
    10109  return true;
    10110  }
    10111  }
    10112 
    10113  return false;
    10114 }
    10115 
    10116 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    10117  uint32_t currentFrameIndex,
    10118  uint32_t frameInUseCount,
    10119  VmaAllocationRequest* pAllocationRequest)
    10120 {
    10121  if(pAllocationRequest->itemsToMakeLostCount == 0)
    10122  {
    10123  return true;
    10124  }
    10125 
    10126  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    10127 
    10128  // We always start from 1st.
    10129  SuballocationVectorType* suballocations = &AccessSuballocations1st();
    10130  size_t index = m_1stNullItemsBeginCount;
    10131  size_t madeLostCount = 0;
    10132  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    10133  {
    10134  if(index == suballocations->size())
    10135  {
    10136  index = 0;
    10137  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
    10138  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10139  {
    10140  suballocations = &AccessSuballocations2nd();
    10141  }
    10142  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
    10143  // suballocations continues pointing at AccessSuballocations1st().
    10144  VMA_ASSERT(!suballocations->empty());
    10145  }
    10146  VmaSuballocation& suballoc = (*suballocations)[index];
    10147  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10148  {
    10149  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10150  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    10151  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10152  {
    10153  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10154  suballoc.hAllocation = VK_NULL_HANDLE;
    10155  m_SumFreeSize += suballoc.size;
    10156  if(suballocations == &AccessSuballocations1st())
    10157  {
    10158  ++m_1stNullItemsMiddleCount;
    10159  }
    10160  else
    10161  {
    10162  ++m_2ndNullItemsCount;
    10163  }
    10164  ++madeLostCount;
    10165  }
    10166  else
    10167  {
    10168  return false;
    10169  }
    10170  }
    10171  ++index;
    10172  }
    10173 
    10174  CleanupAfterFree();
    10175  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    10176 
    10177  return true;
    10178 }
    10179 
    10180 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10181 {
    10182  uint32_t lostAllocationCount = 0;
    10183 
    10184  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10185  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10186  {
    10187  VmaSuballocation& suballoc = suballocations1st[i];
    10188  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10189  suballoc.hAllocation->CanBecomeLost() &&
    10190  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10191  {
    10192  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10193  suballoc.hAllocation = VK_NULL_HANDLE;
    10194  ++m_1stNullItemsMiddleCount;
    10195  m_SumFreeSize += suballoc.size;
    10196  ++lostAllocationCount;
    10197  }
    10198  }
    10199 
    10200  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10201  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10202  {
    10203  VmaSuballocation& suballoc = suballocations2nd[i];
    10204  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10205  suballoc.hAllocation->CanBecomeLost() &&
    10206  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10207  {
    10208  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10209  suballoc.hAllocation = VK_NULL_HANDLE;
    10210  ++m_2ndNullItemsCount;
    10211  m_SumFreeSize += suballoc.size;
    10212  ++lostAllocationCount;
    10213  }
    10214  }
    10215 
    10216  if(lostAllocationCount)
    10217  {
    10218  CleanupAfterFree();
    10219  }
    10220 
    10221  return lostAllocationCount;
    10222 }
    10223 
    10224 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    10225 {
    10226  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10227  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10228  {
    10229  const VmaSuballocation& suballoc = suballocations1st[i];
    10230  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10231  {
    10232  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10233  {
    10234  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10235  return VK_ERROR_VALIDATION_FAILED_EXT;
    10236  }
    10237  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10238  {
    10239  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10240  return VK_ERROR_VALIDATION_FAILED_EXT;
    10241  }
    10242  }
    10243  }
    10244 
    10245  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10246  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10247  {
    10248  const VmaSuballocation& suballoc = suballocations2nd[i];
    10249  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10250  {
    10251  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10252  {
    10253  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10254  return VK_ERROR_VALIDATION_FAILED_EXT;
    10255  }
    10256  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10257  {
    10258  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10259  return VK_ERROR_VALIDATION_FAILED_EXT;
    10260  }
    10261  }
    10262  }
    10263 
    10264  return VK_SUCCESS;
    10265 }
    10266 
    10267 void VmaBlockMetadata_Linear::Alloc(
    10268  const VmaAllocationRequest& request,
    10269  VmaSuballocationType type,
    10270  VkDeviceSize allocSize,
    10271  VmaAllocation hAllocation)
    10272 {
    10273  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    10274 
    10275  switch(request.type)
    10276  {
    10277  case VmaAllocationRequestType::UpperAddress:
    10278  {
    10279  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    10280  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    10281  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10282  suballocations2nd.push_back(newSuballoc);
    10283  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    10284  }
    10285  break;
    10286  case VmaAllocationRequestType::EndOf1st:
    10287  {
    10288  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10289 
    10290  VMA_ASSERT(suballocations1st.empty() ||
    10291  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
    10292  // Check if it fits before the end of the block.
    10293  VMA_ASSERT(request.offset + allocSize <= GetSize());
    10294 
    10295  suballocations1st.push_back(newSuballoc);
    10296  }
    10297  break;
    10298  case VmaAllocationRequestType::EndOf2nd:
    10299  {
    10300  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10301  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    10302  VMA_ASSERT(!suballocations1st.empty() &&
    10303  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
    10304  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10305 
    10306  switch(m_2ndVectorMode)
    10307  {
    10308  case SECOND_VECTOR_EMPTY:
    10309  // First allocation from second part ring buffer.
    10310  VMA_ASSERT(suballocations2nd.empty());
    10311  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    10312  break;
    10313  case SECOND_VECTOR_RING_BUFFER:
    10314  // 2-part ring buffer is already started.
    10315  VMA_ASSERT(!suballocations2nd.empty());
    10316  break;
    10317  case SECOND_VECTOR_DOUBLE_STACK:
    10318  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    10319  break;
    10320  default:
    10321  VMA_ASSERT(0);
    10322  }
    10323 
    10324  suballocations2nd.push_back(newSuballoc);
    10325  }
    10326  break;
    10327  default:
    10328  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    10329  }
    10330 
    10331  m_SumFreeSize -= newSuballoc.size;
    10332 }
    10333 
    10334 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    10335 {
    10336  FreeAtOffset(allocation->GetOffset());
    10337 }
    10338 
    10339 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    10340 {
    10341  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10342  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10343 
    10344  if(!suballocations1st.empty())
    10345  {
    10346  // First allocation: Mark it as next empty at the beginning.
    10347  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    10348  if(firstSuballoc.offset == offset)
    10349  {
    10350  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10351  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    10352  m_SumFreeSize += firstSuballoc.size;
    10353  ++m_1stNullItemsBeginCount;
    10354  CleanupAfterFree();
    10355  return;
    10356  }
    10357  }
    10358 
    10359  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    10360  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    10361  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    10362  {
    10363  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10364  if(lastSuballoc.offset == offset)
    10365  {
    10366  m_SumFreeSize += lastSuballoc.size;
    10367  suballocations2nd.pop_back();
    10368  CleanupAfterFree();
    10369  return;
    10370  }
    10371  }
    10372  // Last allocation in 1st vector.
    10373  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    10374  {
    10375  VmaSuballocation& lastSuballoc = suballocations1st.back();
    10376  if(lastSuballoc.offset == offset)
    10377  {
    10378  m_SumFreeSize += lastSuballoc.size;
    10379  suballocations1st.pop_back();
    10380  CleanupAfterFree();
    10381  return;
    10382  }
    10383  }
    10384 
    10385  // Item from the middle of 1st vector.
    10386  {
    10387  VmaSuballocation refSuballoc;
    10388  refSuballoc.offset = offset;
    10389  // Rest of members stays uninitialized intentionally for better performance.
    10390  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    10391  suballocations1st.begin() + m_1stNullItemsBeginCount,
    10392  suballocations1st.end(),
    10393  refSuballoc);
    10394  if(it != suballocations1st.end())
    10395  {
    10396  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10397  it->hAllocation = VK_NULL_HANDLE;
    10398  ++m_1stNullItemsMiddleCount;
    10399  m_SumFreeSize += it->size;
    10400  CleanupAfterFree();
    10401  return;
    10402  }
    10403  }
    10404 
    10405  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    10406  {
    10407  // Item from the middle of 2nd vector.
    10408  VmaSuballocation refSuballoc;
    10409  refSuballoc.offset = offset;
    10410  // Rest of members stays uninitialized intentionally for better performance.
    10411  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    10412  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    10413  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    10414  if(it != suballocations2nd.end())
    10415  {
    10416  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10417  it->hAllocation = VK_NULL_HANDLE;
    10418  ++m_2ndNullItemsCount;
    10419  m_SumFreeSize += it->size;
    10420  CleanupAfterFree();
    10421  return;
    10422  }
    10423  }
    10424 
    10425  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    10426 }
    10427 
    10428 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    10429 {
    10430  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10431  const size_t suballocCount = AccessSuballocations1st().size();
    10432  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    10433 }
    10434 
    10435 void VmaBlockMetadata_Linear::CleanupAfterFree()
    10436 {
    10437  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10438  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10439 
    10440  if(IsEmpty())
    10441  {
    10442  suballocations1st.clear();
    10443  suballocations2nd.clear();
    10444  m_1stNullItemsBeginCount = 0;
    10445  m_1stNullItemsMiddleCount = 0;
    10446  m_2ndNullItemsCount = 0;
    10447  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10448  }
    10449  else
    10450  {
    10451  const size_t suballoc1stCount = suballocations1st.size();
    10452  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10453  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    10454 
    10455  // Find more null items at the beginning of 1st vector.
    10456  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    10457  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10458  {
    10459  ++m_1stNullItemsBeginCount;
    10460  --m_1stNullItemsMiddleCount;
    10461  }
    10462 
    10463  // Find more null items at the end of 1st vector.
    10464  while(m_1stNullItemsMiddleCount > 0 &&
    10465  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    10466  {
    10467  --m_1stNullItemsMiddleCount;
    10468  suballocations1st.pop_back();
    10469  }
    10470 
    10471  // Find more null items at the end of 2nd vector.
    10472  while(m_2ndNullItemsCount > 0 &&
    10473  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    10474  {
    10475  --m_2ndNullItemsCount;
    10476  suballocations2nd.pop_back();
    10477  }
    10478 
    10479  // Find more null items at the beginning of 2nd vector.
    10480  while(m_2ndNullItemsCount > 0 &&
    10481  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
    10482  {
    10483  --m_2ndNullItemsCount;
    10484  suballocations2nd.remove(0);
    10485  }
    10486 
    10487  if(ShouldCompact1st())
    10488  {
    10489  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    10490  size_t srcIndex = m_1stNullItemsBeginCount;
    10491  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    10492  {
    10493  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    10494  {
    10495  ++srcIndex;
    10496  }
    10497  if(dstIndex != srcIndex)
    10498  {
    10499  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    10500  }
    10501  ++srcIndex;
    10502  }
    10503  suballocations1st.resize(nonNullItemCount);
    10504  m_1stNullItemsBeginCount = 0;
    10505  m_1stNullItemsMiddleCount = 0;
    10506  }
    10507 
    10508  // 2nd vector became empty.
    10509  if(suballocations2nd.empty())
    10510  {
    10511  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10512  }
    10513 
    10514  // 1st vector became empty.
    10515  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    10516  {
    10517  suballocations1st.clear();
    10518  m_1stNullItemsBeginCount = 0;
    10519 
    10520  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10521  {
    10522  // Swap 1st with 2nd. Now 2nd is empty.
    10523  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10524  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    10525  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    10526  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10527  {
    10528  ++m_1stNullItemsBeginCount;
    10529  --m_1stNullItemsMiddleCount;
    10530  }
    10531  m_2ndNullItemsCount = 0;
    10532  m_1stVectorIndex ^= 1;
    10533  }
    10534  }
    10535  }
    10536 
    10537  VMA_HEAVY_ASSERT(Validate());
    10538 }
    10539 
    10540 
    10542 // class VmaBlockMetadata_Buddy
    10543 
    10544 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    10545  VmaBlockMetadata(hAllocator),
    10546  m_Root(VMA_NULL),
    10547  m_AllocationCount(0),
    10548  m_FreeCount(1),
    10549  m_SumFreeSize(0)
    10550 {
    10551  memset(m_FreeList, 0, sizeof(m_FreeList));
    10552 }
    10553 
    10554 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    10555 {
    10556  DeleteNode(m_Root);
    10557 }
    10558 
    10559 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    10560 {
    10561  VmaBlockMetadata::Init(size);
    10562 
    10563  m_UsableSize = VmaPrevPow2(size);
    10564  m_SumFreeSize = m_UsableSize;
    10565 
    10566  // Calculate m_LevelCount.
    10567  m_LevelCount = 1;
    10568  while(m_LevelCount < MAX_LEVELS &&
    10569  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    10570  {
    10571  ++m_LevelCount;
    10572  }
    10573 
    10574  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    10575  rootNode->offset = 0;
    10576  rootNode->type = Node::TYPE_FREE;
    10577  rootNode->parent = VMA_NULL;
    10578  rootNode->buddy = VMA_NULL;
    10579 
    10580  m_Root = rootNode;
    10581  AddToFreeListFront(0, rootNode);
    10582 }
    10583 
    10584 bool VmaBlockMetadata_Buddy::Validate() const
    10585 {
    10586  // Validate tree.
    10587  ValidationContext ctx;
    10588  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    10589  {
    10590  VMA_VALIDATE(false && "ValidateNode failed.");
    10591  }
    10592  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    10593  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    10594 
    10595  // Validate free node lists.
    10596  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10597  {
    10598  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    10599  m_FreeList[level].front->free.prev == VMA_NULL);
    10600 
    10601  for(Node* node = m_FreeList[level].front;
    10602  node != VMA_NULL;
    10603  node = node->free.next)
    10604  {
    10605  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    10606 
    10607  if(node->free.next == VMA_NULL)
    10608  {
    10609  VMA_VALIDATE(m_FreeList[level].back == node);
    10610  }
    10611  else
    10612  {
    10613  VMA_VALIDATE(node->free.next->free.prev == node);
    10614  }
    10615  }
    10616  }
    10617 
    10618  // Validate that free lists ar higher levels are empty.
    10619  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    10620  {
    10621  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    10622  }
    10623 
    10624  return true;
    10625 }
    10626 
    10627 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    10628 {
    10629  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10630  {
    10631  if(m_FreeList[level].front != VMA_NULL)
    10632  {
    10633  return LevelToNodeSize(level);
    10634  }
    10635  }
    10636  return 0;
    10637 }
    10638 
    10639 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    10640 {
    10641  const VkDeviceSize unusableSize = GetUnusableSize();
    10642 
    10643  outInfo.blockCount = 1;
    10644 
    10645  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    10646  outInfo.usedBytes = outInfo.unusedBytes = 0;
    10647 
    10648  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    10649  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    10650  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    10651 
    10652  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    10653 
    10654  if(unusableSize > 0)
    10655  {
    10656  ++outInfo.unusedRangeCount;
    10657  outInfo.unusedBytes += unusableSize;
    10658  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    10659  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    10660  }
    10661 }
    10662 
    10663 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    10664 {
    10665  const VkDeviceSize unusableSize = GetUnusableSize();
    10666 
    10667  inoutStats.size += GetSize();
    10668  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    10669  inoutStats.allocationCount += m_AllocationCount;
    10670  inoutStats.unusedRangeCount += m_FreeCount;
    10671  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    10672 
    10673  if(unusableSize > 0)
    10674  {
    10675  ++inoutStats.unusedRangeCount;
    10676  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    10677  }
    10678 }
    10679 
    10680 #if VMA_STATS_STRING_ENABLED
    10681 
    10682 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    10683 {
    10684  // TODO optimize
    10685  VmaStatInfo stat;
    10686  CalcAllocationStatInfo(stat);
    10687 
    10688  PrintDetailedMap_Begin(
    10689  json,
    10690  stat.unusedBytes,
    10691  stat.allocationCount,
    10692  stat.unusedRangeCount);
    10693 
    10694  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    10695 
    10696  const VkDeviceSize unusableSize = GetUnusableSize();
    10697  if(unusableSize > 0)
    10698  {
    10699  PrintDetailedMap_UnusedRange(json,
    10700  m_UsableSize, // offset
    10701  unusableSize); // size
    10702  }
    10703 
    10704  PrintDetailedMap_End(json);
    10705 }
    10706 
    10707 #endif // #if VMA_STATS_STRING_ENABLED
    10708 
    10709 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    10710  uint32_t currentFrameIndex,
    10711  uint32_t frameInUseCount,
    10712  VkDeviceSize bufferImageGranularity,
    10713  VkDeviceSize allocSize,
    10714  VkDeviceSize allocAlignment,
    10715  bool upperAddress,
    10716  VmaSuballocationType allocType,
    10717  bool canMakeOtherLost,
    10718  uint32_t strategy,
    10719  VmaAllocationRequest* pAllocationRequest)
    10720 {
    10721  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    10722 
    10723  // Simple way to respect bufferImageGranularity. May be optimized some day.
    10724  // Whenever it might be an OPTIMAL image...
    10725  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    10726  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    10727  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    10728  {
    10729  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    10730  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    10731  }
    10732 
    10733  if(allocSize > m_UsableSize)
    10734  {
    10735  return false;
    10736  }
    10737 
    10738  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10739  for(uint32_t level = targetLevel + 1; level--; )
    10740  {
    10741  for(Node* freeNode = m_FreeList[level].front;
    10742  freeNode != VMA_NULL;
    10743  freeNode = freeNode->free.next)
    10744  {
    10745  if(freeNode->offset % allocAlignment == 0)
    10746  {
    10747  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    10748  pAllocationRequest->offset = freeNode->offset;
    10749  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    10750  pAllocationRequest->sumItemSize = 0;
    10751  pAllocationRequest->itemsToMakeLostCount = 0;
    10752  pAllocationRequest->customData = (void*)(uintptr_t)level;
    10753  return true;
    10754  }
    10755  }
    10756  }
    10757 
    10758  return false;
    10759 }
    10760 
    10761 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    10762  uint32_t currentFrameIndex,
    10763  uint32_t frameInUseCount,
    10764  VmaAllocationRequest* pAllocationRequest)
    10765 {
    10766  /*
    10767  Lost allocations are not supported in buddy allocator at the moment.
    10768  Support might be added in the future.
    10769  */
    10770  return pAllocationRequest->itemsToMakeLostCount == 0;
    10771 }
    10772 
    10773 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10774 {
    10775  /*
    10776  Lost allocations are not supported in buddy allocator at the moment.
    10777  Support might be added in the future.
    10778  */
    10779  return 0;
    10780 }
    10781 
    10782 void VmaBlockMetadata_Buddy::Alloc(
    10783  const VmaAllocationRequest& request,
    10784  VmaSuballocationType type,
    10785  VkDeviceSize allocSize,
    10786  VmaAllocation hAllocation)
    10787 {
    10788  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    10789 
    10790  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10791  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    10792 
    10793  Node* currNode = m_FreeList[currLevel].front;
    10794  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10795  while(currNode->offset != request.offset)
    10796  {
    10797  currNode = currNode->free.next;
    10798  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10799  }
    10800 
    10801  // Go down, splitting free nodes.
    10802  while(currLevel < targetLevel)
    10803  {
    10804  // currNode is already first free node at currLevel.
    10805  // Remove it from list of free nodes at this currLevel.
    10806  RemoveFromFreeList(currLevel, currNode);
    10807 
    10808  const uint32_t childrenLevel = currLevel + 1;
    10809 
    10810  // Create two free sub-nodes.
    10811  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    10812  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    10813 
    10814  leftChild->offset = currNode->offset;
    10815  leftChild->type = Node::TYPE_FREE;
    10816  leftChild->parent = currNode;
    10817  leftChild->buddy = rightChild;
    10818 
    10819  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    10820  rightChild->type = Node::TYPE_FREE;
    10821  rightChild->parent = currNode;
    10822  rightChild->buddy = leftChild;
    10823 
    10824  // Convert current currNode to split type.
    10825  currNode->type = Node::TYPE_SPLIT;
    10826  currNode->split.leftChild = leftChild;
    10827 
    10828  // Add child nodes to free list. Order is important!
    10829  AddToFreeListFront(childrenLevel, rightChild);
    10830  AddToFreeListFront(childrenLevel, leftChild);
    10831 
    10832  ++m_FreeCount;
    10833  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    10834  ++currLevel;
    10835  currNode = m_FreeList[currLevel].front;
    10836 
    10837  /*
    10838  We can be sure that currNode, as left child of node previously split,
    10839  also fullfills the alignment requirement.
    10840  */
    10841  }
    10842 
    10843  // Remove from free list.
    10844  VMA_ASSERT(currLevel == targetLevel &&
    10845  currNode != VMA_NULL &&
    10846  currNode->type == Node::TYPE_FREE);
    10847  RemoveFromFreeList(currLevel, currNode);
    10848 
    10849  // Convert to allocation node.
    10850  currNode->type = Node::TYPE_ALLOCATION;
    10851  currNode->allocation.alloc = hAllocation;
    10852 
    10853  ++m_AllocationCount;
    10854  --m_FreeCount;
    10855  m_SumFreeSize -= allocSize;
    10856 }
    10857 
    10858 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    10859 {
    10860  if(node->type == Node::TYPE_SPLIT)
    10861  {
    10862  DeleteNode(node->split.leftChild->buddy);
    10863  DeleteNode(node->split.leftChild);
    10864  }
    10865 
    10866  vma_delete(GetAllocationCallbacks(), node);
    10867 }
    10868 
    10869 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    10870 {
    10871  VMA_VALIDATE(level < m_LevelCount);
    10872  VMA_VALIDATE(curr->parent == parent);
    10873  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    10874  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    10875  switch(curr->type)
    10876  {
    10877  case Node::TYPE_FREE:
    10878  // curr->free.prev, next are validated separately.
    10879  ctx.calculatedSumFreeSize += levelNodeSize;
    10880  ++ctx.calculatedFreeCount;
    10881  break;
    10882  case Node::TYPE_ALLOCATION:
    10883  ++ctx.calculatedAllocationCount;
    10884  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    10885  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    10886  break;
    10887  case Node::TYPE_SPLIT:
    10888  {
    10889  const uint32_t childrenLevel = level + 1;
    10890  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    10891  const Node* const leftChild = curr->split.leftChild;
    10892  VMA_VALIDATE(leftChild != VMA_NULL);
    10893  VMA_VALIDATE(leftChild->offset == curr->offset);
    10894  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    10895  {
    10896  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    10897  }
    10898  const Node* const rightChild = leftChild->buddy;
    10899  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    10900  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    10901  {
    10902  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    10903  }
    10904  }
    10905  break;
    10906  default:
    10907  return false;
    10908  }
    10909 
    10910  return true;
    10911 }
    10912 
    10913 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    10914 {
    10915  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    10916  uint32_t level = 0;
    10917  VkDeviceSize currLevelNodeSize = m_UsableSize;
    10918  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    10919  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    10920  {
    10921  ++level;
    10922  currLevelNodeSize = nextLevelNodeSize;
    10923  nextLevelNodeSize = currLevelNodeSize >> 1;
    10924  }
    10925  return level;
    10926 }
    10927 
    10928 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    10929 {
    10930  // Find node and level.
    10931  Node* node = m_Root;
    10932  VkDeviceSize nodeOffset = 0;
    10933  uint32_t level = 0;
    10934  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    10935  while(node->type == Node::TYPE_SPLIT)
    10936  {
    10937  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    10938  if(offset < nodeOffset + nextLevelSize)
    10939  {
    10940  node = node->split.leftChild;
    10941  }
    10942  else
    10943  {
    10944  node = node->split.leftChild->buddy;
    10945  nodeOffset += nextLevelSize;
    10946  }
    10947  ++level;
    10948  levelNodeSize = nextLevelSize;
    10949  }
    10950 
    10951  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    10952  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    10953 
    10954  ++m_FreeCount;
    10955  --m_AllocationCount;
    10956  m_SumFreeSize += alloc->GetSize();
    10957 
    10958  node->type = Node::TYPE_FREE;
    10959 
    10960  // Join free nodes if possible.
    10961  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    10962  {
    10963  RemoveFromFreeList(level, node->buddy);
    10964  Node* const parent = node->parent;
    10965 
    10966  vma_delete(GetAllocationCallbacks(), node->buddy);
    10967  vma_delete(GetAllocationCallbacks(), node);
    10968  parent->type = Node::TYPE_FREE;
    10969 
    10970  node = parent;
    10971  --level;
    10972  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    10973  --m_FreeCount;
    10974  }
    10975 
    10976  AddToFreeListFront(level, node);
    10977 }
    10978 
    10979 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    10980 {
    10981  switch(node->type)
    10982  {
    10983  case Node::TYPE_FREE:
    10984  ++outInfo.unusedRangeCount;
    10985  outInfo.unusedBytes += levelNodeSize;
    10986  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    10987  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    10988  break;
    10989  case Node::TYPE_ALLOCATION:
    10990  {
    10991  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    10992  ++outInfo.allocationCount;
    10993  outInfo.usedBytes += allocSize;
    10994  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    10995  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    10996 
    10997  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    10998  if(unusedRangeSize > 0)
    10999  {
    11000  ++outInfo.unusedRangeCount;
    11001  outInfo.unusedBytes += unusedRangeSize;
    11002  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    11003  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    11004  }
    11005  }
    11006  break;
    11007  case Node::TYPE_SPLIT:
    11008  {
    11009  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11010  const Node* const leftChild = node->split.leftChild;
    11011  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    11012  const Node* const rightChild = leftChild->buddy;
    11013  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    11014  }
    11015  break;
    11016  default:
    11017  VMA_ASSERT(0);
    11018  }
    11019 }
    11020 
    11021 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    11022 {
    11023  VMA_ASSERT(node->type == Node::TYPE_FREE);
    11024 
    11025  // List is empty.
    11026  Node* const frontNode = m_FreeList[level].front;
    11027  if(frontNode == VMA_NULL)
    11028  {
    11029  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    11030  node->free.prev = node->free.next = VMA_NULL;
    11031  m_FreeList[level].front = m_FreeList[level].back = node;
    11032  }
    11033  else
    11034  {
    11035  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    11036  node->free.prev = VMA_NULL;
    11037  node->free.next = frontNode;
    11038  frontNode->free.prev = node;
    11039  m_FreeList[level].front = node;
    11040  }
    11041 }
    11042 
    11043 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    11044 {
    11045  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    11046 
    11047  // It is at the front.
    11048  if(node->free.prev == VMA_NULL)
    11049  {
    11050  VMA_ASSERT(m_FreeList[level].front == node);
    11051  m_FreeList[level].front = node->free.next;
    11052  }
    11053  else
    11054  {
    11055  Node* const prevFreeNode = node->free.prev;
    11056  VMA_ASSERT(prevFreeNode->free.next == node);
    11057  prevFreeNode->free.next = node->free.next;
    11058  }
    11059 
    11060  // It is at the back.
    11061  if(node->free.next == VMA_NULL)
    11062  {
    11063  VMA_ASSERT(m_FreeList[level].back == node);
    11064  m_FreeList[level].back = node->free.prev;
    11065  }
    11066  else
    11067  {
    11068  Node* const nextFreeNode = node->free.next;
    11069  VMA_ASSERT(nextFreeNode->free.prev == node);
    11070  nextFreeNode->free.prev = node->free.prev;
    11071  }
    11072 }
    11073 
    11074 #if VMA_STATS_STRING_ENABLED
    11075 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    11076 {
    11077  switch(node->type)
    11078  {
    11079  case Node::TYPE_FREE:
    11080  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    11081  break;
    11082  case Node::TYPE_ALLOCATION:
    11083  {
    11084  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    11085  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11086  if(allocSize < levelNodeSize)
    11087  {
    11088  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    11089  }
    11090  }
    11091  break;
    11092  case Node::TYPE_SPLIT:
    11093  {
    11094  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11095  const Node* const leftChild = node->split.leftChild;
    11096  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    11097  const Node* const rightChild = leftChild->buddy;
    11098  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    11099  }
    11100  break;
    11101  default:
    11102  VMA_ASSERT(0);
    11103  }
    11104 }
    11105 #endif // #if VMA_STATS_STRING_ENABLED
    11106 
    11107 
    11109 // class VmaDeviceMemoryBlock
    11110 
    11111 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    11112  m_pMetadata(VMA_NULL),
    11113  m_MemoryTypeIndex(UINT32_MAX),
    11114  m_Id(0),
    11115  m_hMemory(VK_NULL_HANDLE),
    11116  m_MapCount(0),
    11117  m_pMappedData(VMA_NULL)
    11118 {
    11119 }
    11120 
    11121 void VmaDeviceMemoryBlock::Init(
    11122  VmaAllocator hAllocator,
    11123  VmaPool hParentPool,
    11124  uint32_t newMemoryTypeIndex,
    11125  VkDeviceMemory newMemory,
    11126  VkDeviceSize newSize,
    11127  uint32_t id,
    11128  uint32_t algorithm)
    11129 {
    11130  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    11131 
    11132  m_hParentPool = hParentPool;
    11133  m_MemoryTypeIndex = newMemoryTypeIndex;
    11134  m_Id = id;
    11135  m_hMemory = newMemory;
    11136 
    11137  switch(algorithm)
    11138  {
    11140  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    11141  break;
    11143  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    11144  break;
    11145  default:
    11146  VMA_ASSERT(0);
    11147  // Fall-through.
    11148  case 0:
    11149  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    11150  }
    11151  m_pMetadata->Init(newSize);
    11152 }
    11153 
    11154 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    11155 {
    11156  // This is the most important assert in the entire library.
    11157  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    11158  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    11159 
    11160  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    11161  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    11162  m_hMemory = VK_NULL_HANDLE;
    11163 
    11164  vma_delete(allocator, m_pMetadata);
    11165  m_pMetadata = VMA_NULL;
    11166 }
    11167 
    11168 bool VmaDeviceMemoryBlock::Validate() const
    11169 {
    11170  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    11171  (m_pMetadata->GetSize() != 0));
    11172 
    11173  return m_pMetadata->Validate();
    11174 }
    11175 
    11176 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    11177 {
    11178  void* pData = nullptr;
    11179  VkResult res = Map(hAllocator, 1, &pData);
    11180  if(res != VK_SUCCESS)
    11181  {
    11182  return res;
    11183  }
    11184 
    11185  res = m_pMetadata->CheckCorruption(pData);
    11186 
    11187  Unmap(hAllocator, 1);
    11188 
    11189  return res;
    11190 }
    11191 
    11192 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    11193 {
    11194  if(count == 0)
    11195  {
    11196  return VK_SUCCESS;
    11197  }
    11198 
    11199  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11200  if(m_MapCount != 0)
    11201  {
    11202  m_MapCount += count;
    11203  VMA_ASSERT(m_pMappedData != VMA_NULL);
    11204  if(ppData != VMA_NULL)
    11205  {
    11206  *ppData = m_pMappedData;
    11207  }
    11208  return VK_SUCCESS;
    11209  }
    11210  else
    11211  {
    11212  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    11213  hAllocator->m_hDevice,
    11214  m_hMemory,
    11215  0, // offset
    11216  VK_WHOLE_SIZE,
    11217  0, // flags
    11218  &m_pMappedData);
    11219  if(result == VK_SUCCESS)
    11220  {
    11221  if(ppData != VMA_NULL)
    11222  {
    11223  *ppData = m_pMappedData;
    11224  }
    11225  m_MapCount = count;
    11226  }
    11227  return result;
    11228  }
    11229 }
    11230 
    11231 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    11232 {
    11233  if(count == 0)
    11234  {
    11235  return;
    11236  }
    11237 
    11238  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11239  if(m_MapCount >= count)
    11240  {
    11241  m_MapCount -= count;
    11242  if(m_MapCount == 0)
    11243  {
    11244  m_pMappedData = VMA_NULL;
    11245  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    11246  }
    11247  }
    11248  else
    11249  {
    11250  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    11251  }
    11252 }
    11253 
    11254 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11255 {
    11256  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11257  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11258 
    11259  void* pData;
    11260  VkResult res = Map(hAllocator, 1, &pData);
    11261  if(res != VK_SUCCESS)
    11262  {
    11263  return res;
    11264  }
    11265 
    11266  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    11267  VmaWriteMagicValue(pData, allocOffset + allocSize);
    11268 
    11269  Unmap(hAllocator, 1);
    11270 
    11271  return VK_SUCCESS;
    11272 }
    11273 
    11274 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11275 {
    11276  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11277  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11278 
    11279  void* pData;
    11280  VkResult res = Map(hAllocator, 1, &pData);
    11281  if(res != VK_SUCCESS)
    11282  {
    11283  return res;
    11284  }
    11285 
    11286  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    11287  {
    11288  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    11289  }
    11290  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    11291  {
    11292  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    11293  }
    11294 
    11295  Unmap(hAllocator, 1);
    11296 
    11297  return VK_SUCCESS;
    11298 }
    11299 
    11300 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    11301  const VmaAllocator hAllocator,
    11302  const VmaAllocation hAllocation,
    11303  VkBuffer hBuffer)
    11304 {
    11305  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11306  hAllocation->GetBlock() == this);
    11307  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11308  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11309  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    11310  hAllocator->m_hDevice,
    11311  hBuffer,
    11312  m_hMemory,
    11313  hAllocation->GetOffset());
    11314 }
    11315 
    11316 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    11317  const VmaAllocator hAllocator,
    11318  const VmaAllocation hAllocation,
    11319  VkImage hImage)
    11320 {
    11321  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11322  hAllocation->GetBlock() == this);
    11323  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11324  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11325  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    11326  hAllocator->m_hDevice,
    11327  hImage,
    11328  m_hMemory,
    11329  hAllocation->GetOffset());
    11330 }
    11331 
    11332 static void InitStatInfo(VmaStatInfo& outInfo)
    11333 {
    11334  memset(&outInfo, 0, sizeof(outInfo));
    11335  outInfo.allocationSizeMin = UINT64_MAX;
    11336  outInfo.unusedRangeSizeMin = UINT64_MAX;
    11337 }
    11338 
    11339 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    11340 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    11341 {
    11342  inoutInfo.blockCount += srcInfo.blockCount;
    11343  inoutInfo.allocationCount += srcInfo.allocationCount;
    11344  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    11345  inoutInfo.usedBytes += srcInfo.usedBytes;
    11346  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    11347  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    11348  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    11349  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    11350  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    11351 }
    11352 
    11353 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    11354 {
    11355  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    11356  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    11357  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    11358  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    11359 }
    11360 
    11361 VmaPool_T::VmaPool_T(
    11362  VmaAllocator hAllocator,
    11363  const VmaPoolCreateInfo& createInfo,
    11364  VkDeviceSize preferredBlockSize) :
    11365  m_BlockVector(
    11366  hAllocator,
    11367  this, // hParentPool
    11368  createInfo.memoryTypeIndex,
    11369  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    11370  createInfo.minBlockCount,
    11371  createInfo.maxBlockCount,
    11372  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    11373  createInfo.frameInUseCount,
    11374  true, // isCustomPool
    11375  createInfo.blockSize != 0, // explicitBlockSize
    11376  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    11377  m_Id(0)
    11378 {
    11379 }
    11380 
    11381 VmaPool_T::~VmaPool_T()
    11382 {
    11383 }
    11384 
    11385 #if VMA_STATS_STRING_ENABLED
    11386 
    11387 #endif // #if VMA_STATS_STRING_ENABLED
    11388 
    11389 VmaBlockVector::VmaBlockVector(
    11390  VmaAllocator hAllocator,
    11391  VmaPool hParentPool,
    11392  uint32_t memoryTypeIndex,
    11393  VkDeviceSize preferredBlockSize,
    11394  size_t minBlockCount,
    11395  size_t maxBlockCount,
    11396  VkDeviceSize bufferImageGranularity,
    11397  uint32_t frameInUseCount,
    11398  bool isCustomPool,
    11399  bool explicitBlockSize,
    11400  uint32_t algorithm) :
    11401  m_hAllocator(hAllocator),
    11402  m_hParentPool(hParentPool),
    11403  m_MemoryTypeIndex(memoryTypeIndex),
    11404  m_PreferredBlockSize(preferredBlockSize),
    11405  m_MinBlockCount(minBlockCount),
    11406  m_MaxBlockCount(maxBlockCount),
    11407  m_BufferImageGranularity(bufferImageGranularity),
    11408  m_FrameInUseCount(frameInUseCount),
    11409  m_IsCustomPool(isCustomPool),
    11410  m_ExplicitBlockSize(explicitBlockSize),
    11411  m_Algorithm(algorithm),
    11412  m_HasEmptyBlock(false),
    11413  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    11414  m_NextBlockId(0)
    11415 {
    11416 }
    11417 
    11418 VmaBlockVector::~VmaBlockVector()
    11419 {
    11420  for(size_t i = m_Blocks.size(); i--; )
    11421  {
    11422  m_Blocks[i]->Destroy(m_hAllocator);
    11423  vma_delete(m_hAllocator, m_Blocks[i]);
    11424  }
    11425 }
    11426 
    11427 VkResult VmaBlockVector::CreateMinBlocks()
    11428 {
    11429  for(size_t i = 0; i < m_MinBlockCount; ++i)
    11430  {
    11431  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    11432  if(res != VK_SUCCESS)
    11433  {
    11434  return res;
    11435  }
    11436  }
    11437  return VK_SUCCESS;
    11438 }
    11439 
    11440 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    11441 {
    11442  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    11443 
    11444  const size_t blockCount = m_Blocks.size();
    11445 
    11446  pStats->size = 0;
    11447  pStats->unusedSize = 0;
    11448  pStats->allocationCount = 0;
    11449  pStats->unusedRangeCount = 0;
    11450  pStats->unusedRangeSizeMax = 0;
    11451  pStats->blockCount = blockCount;
    11452 
    11453  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11454  {
    11455  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11456  VMA_ASSERT(pBlock);
    11457  VMA_HEAVY_ASSERT(pBlock->Validate());
    11458  pBlock->m_pMetadata->AddPoolStats(*pStats);
    11459  }
    11460 }
    11461 
    11462 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    11463 {
    11464  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    11465  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    11466  (VMA_DEBUG_MARGIN > 0) &&
    11467  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
    11468  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    11469 }
    11470 
    11471 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    11472 
    11473 VkResult VmaBlockVector::Allocate(
    11474  uint32_t currentFrameIndex,
    11475  VkDeviceSize size,
    11476  VkDeviceSize alignment,
    11477  const VmaAllocationCreateInfo& createInfo,
    11478  VmaSuballocationType suballocType,
    11479  size_t allocationCount,
    11480  VmaAllocation* pAllocations)
    11481 {
    11482  size_t allocIndex;
    11483  VkResult res = VK_SUCCESS;
    11484 
    11485  if(IsCorruptionDetectionEnabled())
    11486  {
    11487  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11488  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11489  }
    11490 
    11491  {
    11492  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11493  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    11494  {
    11495  res = AllocatePage(
    11496  currentFrameIndex,
    11497  size,
    11498  alignment,
    11499  createInfo,
    11500  suballocType,
    11501  pAllocations + allocIndex);
    11502  if(res != VK_SUCCESS)
    11503  {
    11504  break;
    11505  }
    11506  }
    11507  }
    11508 
    11509  if(res != VK_SUCCESS)
    11510  {
    11511  // Free all already created allocations.
    11512  while(allocIndex--)
    11513  {
    11514  Free(pAllocations[allocIndex]);
    11515  }
    11516  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    11517  }
    11518 
    11519  return res;
    11520 }
    11521 
    11522 VkResult VmaBlockVector::AllocatePage(
    11523  uint32_t currentFrameIndex,
    11524  VkDeviceSize size,
    11525  VkDeviceSize alignment,
    11526  const VmaAllocationCreateInfo& createInfo,
    11527  VmaSuballocationType suballocType,
    11528  VmaAllocation* pAllocation)
    11529 {
    11530  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11531  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    11532  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11533  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11534  const bool canCreateNewBlock =
    11535  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    11536  (m_Blocks.size() < m_MaxBlockCount);
    11537  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    11538 
    11539  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    11540  // Which in turn is available only when maxBlockCount = 1.
    11541  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    11542  {
    11543  canMakeOtherLost = false;
    11544  }
    11545 
    11546  // Upper address can only be used with linear allocator and within single memory block.
    11547  if(isUpperAddress &&
    11548  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    11549  {
    11550  return VK_ERROR_FEATURE_NOT_PRESENT;
    11551  }
    11552 
    11553  // Validate strategy.
    11554  switch(strategy)
    11555  {
    11556  case 0:
    11558  break;
    11562  break;
    11563  default:
    11564  return VK_ERROR_FEATURE_NOT_PRESENT;
    11565  }
    11566 
    11567  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    11568  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    11569  {
    11570  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11571  }
    11572 
    11573  /*
    11574  Under certain condition, this whole section can be skipped for optimization, so
    11575  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    11576  e.g. for custom pools with linear algorithm.
    11577  */
    11578  if(!canMakeOtherLost || canCreateNewBlock)
    11579  {
    11580  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    11581  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    11583 
    11584  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11585  {
    11586  // Use only last block.
    11587  if(!m_Blocks.empty())
    11588  {
    11589  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    11590  VMA_ASSERT(pCurrBlock);
    11591  VkResult res = AllocateFromBlock(
    11592  pCurrBlock,
    11593  currentFrameIndex,
    11594  size,
    11595  alignment,
    11596  allocFlagsCopy,
    11597  createInfo.pUserData,
    11598  suballocType,
    11599  strategy,
    11600  pAllocation);
    11601  if(res == VK_SUCCESS)
    11602  {
    11603  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    11604  return VK_SUCCESS;
    11605  }
    11606  }
    11607  }
    11608  else
    11609  {
    11611  {
    11612  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11613  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11614  {
    11615  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11616  VMA_ASSERT(pCurrBlock);
    11617  VkResult res = AllocateFromBlock(
    11618  pCurrBlock,
    11619  currentFrameIndex,
    11620  size,
    11621  alignment,
    11622  allocFlagsCopy,
    11623  createInfo.pUserData,
    11624  suballocType,
    11625  strategy,
    11626  pAllocation);
    11627  if(res == VK_SUCCESS)
    11628  {
    11629  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11630  return VK_SUCCESS;
    11631  }
    11632  }
    11633  }
    11634  else // WORST_FIT, FIRST_FIT
    11635  {
    11636  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11637  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11638  {
    11639  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11640  VMA_ASSERT(pCurrBlock);
    11641  VkResult res = AllocateFromBlock(
    11642  pCurrBlock,
    11643  currentFrameIndex,
    11644  size,
    11645  alignment,
    11646  allocFlagsCopy,
    11647  createInfo.pUserData,
    11648  suballocType,
    11649  strategy,
    11650  pAllocation);
    11651  if(res == VK_SUCCESS)
    11652  {
    11653  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11654  return VK_SUCCESS;
    11655  }
    11656  }
    11657  }
    11658  }
    11659 
    11660  // 2. Try to create new block.
    11661  if(canCreateNewBlock)
    11662  {
    11663  // Calculate optimal size for new block.
    11664  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    11665  uint32_t newBlockSizeShift = 0;
    11666  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    11667 
    11668  if(!m_ExplicitBlockSize)
    11669  {
    11670  // Allocate 1/8, 1/4, 1/2 as first blocks.
    11671  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    11672  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    11673  {
    11674  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11675  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    11676  {
    11677  newBlockSize = smallerNewBlockSize;
    11678  ++newBlockSizeShift;
    11679  }
    11680  else
    11681  {
    11682  break;
    11683  }
    11684  }
    11685  }
    11686 
    11687  size_t newBlockIndex = 0;
    11688  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    11689  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    11690  if(!m_ExplicitBlockSize)
    11691  {
    11692  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    11693  {
    11694  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11695  if(smallerNewBlockSize >= size)
    11696  {
    11697  newBlockSize = smallerNewBlockSize;
    11698  ++newBlockSizeShift;
    11699  res = CreateBlock(newBlockSize, &newBlockIndex);
    11700  }
    11701  else
    11702  {
    11703  break;
    11704  }
    11705  }
    11706  }
    11707 
    11708  if(res == VK_SUCCESS)
    11709  {
    11710  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    11711  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    11712 
    11713  res = AllocateFromBlock(
    11714  pBlock,
    11715  currentFrameIndex,
    11716  size,
    11717  alignment,
    11718  allocFlagsCopy,
    11719  createInfo.pUserData,
    11720  suballocType,
    11721  strategy,
    11722  pAllocation);
    11723  if(res == VK_SUCCESS)
    11724  {
    11725  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    11726  return VK_SUCCESS;
    11727  }
    11728  else
    11729  {
    11730  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    11731  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11732  }
    11733  }
    11734  }
    11735  }
    11736 
    11737  // 3. Try to allocate from existing blocks with making other allocations lost.
    11738  if(canMakeOtherLost)
    11739  {
    11740  uint32_t tryIndex = 0;
    11741  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    11742  {
    11743  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    11744  VmaAllocationRequest bestRequest = {};
    11745  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    11746 
    11747  // 1. Search existing allocations.
    11749  {
    11750  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11751  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11752  {
    11753  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11754  VMA_ASSERT(pCurrBlock);
    11755  VmaAllocationRequest currRequest = {};
    11756  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11757  currentFrameIndex,
    11758  m_FrameInUseCount,
    11759  m_BufferImageGranularity,
    11760  size,
    11761  alignment,
    11762  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11763  suballocType,
    11764  canMakeOtherLost,
    11765  strategy,
    11766  &currRequest))
    11767  {
    11768  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11769  if(pBestRequestBlock == VMA_NULL ||
    11770  currRequestCost < bestRequestCost)
    11771  {
    11772  pBestRequestBlock = pCurrBlock;
    11773  bestRequest = currRequest;
    11774  bestRequestCost = currRequestCost;
    11775 
    11776  if(bestRequestCost == 0)
    11777  {
    11778  break;
    11779  }
    11780  }
    11781  }
    11782  }
    11783  }
    11784  else // WORST_FIT, FIRST_FIT
    11785  {
    11786  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11787  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11788  {
    11789  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11790  VMA_ASSERT(pCurrBlock);
    11791  VmaAllocationRequest currRequest = {};
    11792  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11793  currentFrameIndex,
    11794  m_FrameInUseCount,
    11795  m_BufferImageGranularity,
    11796  size,
    11797  alignment,
    11798  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11799  suballocType,
    11800  canMakeOtherLost,
    11801  strategy,
    11802  &currRequest))
    11803  {
    11804  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11805  if(pBestRequestBlock == VMA_NULL ||
    11806  currRequestCost < bestRequestCost ||
    11808  {
    11809  pBestRequestBlock = pCurrBlock;
    11810  bestRequest = currRequest;
    11811  bestRequestCost = currRequestCost;
    11812 
    11813  if(bestRequestCost == 0 ||
    11815  {
    11816  break;
    11817  }
    11818  }
    11819  }
    11820  }
    11821  }
    11822 
    11823  if(pBestRequestBlock != VMA_NULL)
    11824  {
    11825  if(mapped)
    11826  {
    11827  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    11828  if(res != VK_SUCCESS)
    11829  {
    11830  return res;
    11831  }
    11832  }
    11833 
    11834  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    11835  currentFrameIndex,
    11836  m_FrameInUseCount,
    11837  &bestRequest))
    11838  {
    11839  // We no longer have an empty Allocation.
    11840  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    11841  {
    11842  m_HasEmptyBlock = false;
    11843  }
    11844  // Allocate from this pBlock.
    11845  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    11846  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    11847  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
    11848  (*pAllocation)->InitBlockAllocation(
    11849  pBestRequestBlock,
    11850  bestRequest.offset,
    11851  alignment,
    11852  size,
    11853  suballocType,
    11854  mapped,
    11855  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11856  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    11857  VMA_DEBUG_LOG(" Returned from existing block");
    11858  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    11859  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11860  {
    11861  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11862  }
    11863  if(IsCorruptionDetectionEnabled())
    11864  {
    11865  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    11866  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11867  }
    11868  return VK_SUCCESS;
    11869  }
    11870  // else: Some allocations must have been touched while we are here. Next try.
    11871  }
    11872  else
    11873  {
    11874  // Could not find place in any of the blocks - break outer loop.
    11875  break;
    11876  }
    11877  }
    11878  /* Maximum number of tries exceeded - a very unlike event when many other
    11879  threads are simultaneously touching allocations making it impossible to make
    11880  lost at the same time as we try to allocate. */
    11881  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    11882  {
    11883  return VK_ERROR_TOO_MANY_OBJECTS;
    11884  }
    11885  }
    11886 
    11887  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11888 }
    11889 
    11890 void VmaBlockVector::Free(
    11891  VmaAllocation hAllocation)
    11892 {
    11893  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    11894 
    11895  // Scope for lock.
    11896  {
    11897  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11898 
    11899  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    11900 
    11901  if(IsCorruptionDetectionEnabled())
    11902  {
    11903  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    11904  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    11905  }
    11906 
    11907  if(hAllocation->IsPersistentMap())
    11908  {
    11909  pBlock->Unmap(m_hAllocator, 1);
    11910  }
    11911 
    11912  pBlock->m_pMetadata->Free(hAllocation);
    11913  VMA_HEAVY_ASSERT(pBlock->Validate());
    11914 
    11915  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
    11916 
    11917  // pBlock became empty after this deallocation.
    11918  if(pBlock->m_pMetadata->IsEmpty())
    11919  {
    11920  // Already has empty Allocation. We don't want to have two, so delete this one.
    11921  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    11922  {
    11923  pBlockToDelete = pBlock;
    11924  Remove(pBlock);
    11925  }
    11926  // We now have first empty block.
    11927  else
    11928  {
    11929  m_HasEmptyBlock = true;
    11930  }
    11931  }
    11932  // pBlock didn't become empty, but we have another empty block - find and free that one.
    11933  // (This is optional, heuristics.)
    11934  else if(m_HasEmptyBlock)
    11935  {
    11936  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    11937  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    11938  {
    11939  pBlockToDelete = pLastBlock;
    11940  m_Blocks.pop_back();
    11941  m_HasEmptyBlock = false;
    11942  }
    11943  }
    11944 
    11945  IncrementallySortBlocks();
    11946  }
    11947 
    11948  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    11949  // lock, for performance reason.
    11950  if(pBlockToDelete != VMA_NULL)
    11951  {
    11952  VMA_DEBUG_LOG(" Deleted empty allocation");
    11953  pBlockToDelete->Destroy(m_hAllocator);
    11954  vma_delete(m_hAllocator, pBlockToDelete);
    11955  }
    11956 }
    11957 
    11958 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    11959 {
    11960  VkDeviceSize result = 0;
    11961  for(size_t i = m_Blocks.size(); i--; )
    11962  {
    11963  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    11964  if(result >= m_PreferredBlockSize)
    11965  {
    11966  break;
    11967  }
    11968  }
    11969  return result;
    11970 }
    11971 
    11972 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    11973 {
    11974  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11975  {
    11976  if(m_Blocks[blockIndex] == pBlock)
    11977  {
    11978  VmaVectorRemove(m_Blocks, blockIndex);
    11979  return;
    11980  }
    11981  }
    11982  VMA_ASSERT(0);
    11983 }
    11984 
    11985 void VmaBlockVector::IncrementallySortBlocks()
    11986 {
    11987  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11988  {
    11989  // Bubble sort only until first swap.
    11990  for(size_t i = 1; i < m_Blocks.size(); ++i)
    11991  {
    11992  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    11993  {
    11994  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    11995  return;
    11996  }
    11997  }
    11998  }
    11999 }
    12000 
    12001 VkResult VmaBlockVector::AllocateFromBlock(
    12002  VmaDeviceMemoryBlock* pBlock,
    12003  uint32_t currentFrameIndex,
    12004  VkDeviceSize size,
    12005  VkDeviceSize alignment,
    12006  VmaAllocationCreateFlags allocFlags,
    12007  void* pUserData,
    12008  VmaSuballocationType suballocType,
    12009  uint32_t strategy,
    12010  VmaAllocation* pAllocation)
    12011 {
    12012  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    12013  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    12014  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    12015  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    12016 
    12017  VmaAllocationRequest currRequest = {};
    12018  if(pBlock->m_pMetadata->CreateAllocationRequest(
    12019  currentFrameIndex,
    12020  m_FrameInUseCount,
    12021  m_BufferImageGranularity,
    12022  size,
    12023  alignment,
    12024  isUpperAddress,
    12025  suballocType,
    12026  false, // canMakeOtherLost
    12027  strategy,
    12028  &currRequest))
    12029  {
    12030  // Allocate from pCurrBlock.
    12031  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    12032 
    12033  if(mapped)
    12034  {
    12035  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    12036  if(res != VK_SUCCESS)
    12037  {
    12038  return res;
    12039  }
    12040  }
    12041 
    12042  // We no longer have an empty Allocation.
    12043  if(pBlock->m_pMetadata->IsEmpty())
    12044  {
    12045  m_HasEmptyBlock = false;
    12046  }
    12047 
    12048  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    12049  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    12050  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
    12051  (*pAllocation)->InitBlockAllocation(
    12052  pBlock,
    12053  currRequest.offset,
    12054  alignment,
    12055  size,
    12056  suballocType,
    12057  mapped,
    12058  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    12059  VMA_HEAVY_ASSERT(pBlock->Validate());
    12060  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    12061  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12062  {
    12063  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12064  }
    12065  if(IsCorruptionDetectionEnabled())
    12066  {
    12067  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    12068  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    12069  }
    12070  return VK_SUCCESS;
    12071  }
    12072  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12073 }
    12074 
    12075 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    12076 {
    12077  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12078  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    12079  allocInfo.allocationSize = blockSize;
    12080  VkDeviceMemory mem = VK_NULL_HANDLE;
    12081  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    12082  if(res < 0)
    12083  {
    12084  return res;
    12085  }
    12086 
    12087  // New VkDeviceMemory successfully created.
    12088 
    12089  // Create new Allocation for it.
    12090  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    12091  pBlock->Init(
    12092  m_hAllocator,
    12093  m_hParentPool,
    12094  m_MemoryTypeIndex,
    12095  mem,
    12096  allocInfo.allocationSize,
    12097  m_NextBlockId++,
    12098  m_Algorithm);
    12099 
    12100  m_Blocks.push_back(pBlock);
    12101  if(pNewBlockIndex != VMA_NULL)
    12102  {
    12103  *pNewBlockIndex = m_Blocks.size() - 1;
    12104  }
    12105 
    12106  return VK_SUCCESS;
    12107 }
    12108 
    12109 void VmaBlockVector::ApplyDefragmentationMovesCpu(
    12110  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12111  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
    12112 {
    12113  const size_t blockCount = m_Blocks.size();
    12114  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
    12115 
    12116  enum BLOCK_FLAG
    12117  {
    12118  BLOCK_FLAG_USED = 0x00000001,
    12119  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
    12120  };
    12121 
    12122  struct BlockInfo
    12123  {
    12124  uint32_t flags;
    12125  void* pMappedData;
    12126  };
    12127  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
    12128  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
    12129  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
    12130 
    12131  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12132  const size_t moveCount = moves.size();
    12133  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12134  {
    12135  const VmaDefragmentationMove& move = moves[moveIndex];
    12136  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
    12137  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
    12138  }
    12139 
    12140  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12141 
    12142  // Go over all blocks. Get mapped pointer or map if necessary.
    12143  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12144  {
    12145  BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12146  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12147  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
    12148  {
    12149  currBlockInfo.pMappedData = pBlock->GetMappedData();
    12150  // It is not originally mapped - map it.
    12151  if(currBlockInfo.pMappedData == VMA_NULL)
    12152  {
    12153  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
    12154  if(pDefragCtx->res == VK_SUCCESS)
    12155  {
    12156  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
    12157  }
    12158  }
    12159  }
    12160  }
    12161 
    12162  // Go over all moves. Do actual data transfer.
    12163  if(pDefragCtx->res == VK_SUCCESS)
    12164  {
    12165  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12166  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12167 
    12168  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12169  {
    12170  const VmaDefragmentationMove& move = moves[moveIndex];
    12171 
    12172  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
    12173  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
    12174 
    12175  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
    12176 
    12177  // Invalidate source.
    12178  if(isNonCoherent)
    12179  {
    12180  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
    12181  memRange.memory = pSrcBlock->GetDeviceMemory();
    12182  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
    12183  memRange.size = VMA_MIN(
    12184  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
    12185  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
    12186  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12187  }
    12188 
    12189  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    12190  memmove(
    12191  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
    12192  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
    12193  static_cast<size_t>(move.size));
    12194 
    12195  if(IsCorruptionDetectionEnabled())
    12196  {
    12197  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
    12198  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
    12199  }
    12200 
    12201  // Flush destination.
    12202  if(isNonCoherent)
    12203  {
    12204  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
    12205  memRange.memory = pDstBlock->GetDeviceMemory();
    12206  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
    12207  memRange.size = VMA_MIN(
    12208  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
    12209  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
    12210  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12211  }
    12212  }
    12213  }
    12214 
    12215  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
    12216  // Regardless of pCtx->res == VK_SUCCESS.
    12217  for(size_t blockIndex = blockCount; blockIndex--; )
    12218  {
    12219  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12220  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
    12221  {
    12222  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12223  pBlock->Unmap(m_hAllocator, 1);
    12224  }
    12225  }
    12226 }
    12227 
    12228 void VmaBlockVector::ApplyDefragmentationMovesGpu(
    12229  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12230  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12231  VkCommandBuffer commandBuffer)
    12232 {
    12233  const size_t blockCount = m_Blocks.size();
    12234 
    12235  pDefragCtx->blockContexts.resize(blockCount);
    12236  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
    12237 
    12238  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12239  const size_t moveCount = moves.size();
    12240  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12241  {
    12242  const VmaDefragmentationMove& move = moves[moveIndex];
    12243  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12244  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12245  }
    12246 
    12247  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12248 
    12249  // Go over all blocks. Create and bind buffer for whole block if necessary.
    12250  {
    12251  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
    12252  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
    12253  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    12254 
    12255  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12256  {
    12257  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
    12258  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12259  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
    12260  {
    12261  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
    12262  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
    12263  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
    12264  if(pDefragCtx->res == VK_SUCCESS)
    12265  {
    12266  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
    12267  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
    12268  }
    12269  }
    12270  }
    12271  }
    12272 
    12273  // Go over all moves. Post data transfer commands to command buffer.
    12274  if(pDefragCtx->res == VK_SUCCESS)
    12275  {
    12276  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12277  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12278 
    12279  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12280  {
    12281  const VmaDefragmentationMove& move = moves[moveIndex];
    12282 
    12283  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
    12284  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
    12285 
    12286  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
    12287 
    12288  VkBufferCopy region = {
    12289  move.srcOffset,
    12290  move.dstOffset,
    12291  move.size };
    12292  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
    12293  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
    12294  }
    12295  }
    12296 
    12297  // Save buffers to defrag context for later destruction.
    12298  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
    12299  {
    12300  pDefragCtx->res = VK_NOT_READY;
    12301  }
    12302 }
    12303 
    12304 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
    12305 {
    12306  m_HasEmptyBlock = false;
    12307  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    12308  {
    12309  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12310  if(pBlock->m_pMetadata->IsEmpty())
    12311  {
    12312  if(m_Blocks.size() > m_MinBlockCount)
    12313  {
    12314  if(pDefragmentationStats != VMA_NULL)
    12315  {
    12316  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    12317  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    12318  }
    12319 
    12320  VmaVectorRemove(m_Blocks, blockIndex);
    12321  pBlock->Destroy(m_hAllocator);
    12322  vma_delete(m_hAllocator, pBlock);
    12323  }
    12324  else
    12325  {
    12326  m_HasEmptyBlock = true;
    12327  }
    12328  }
    12329  }
    12330 }
    12331 
    12332 #if VMA_STATS_STRING_ENABLED
    12333 
    12334 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    12335 {
    12336  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12337 
    12338  json.BeginObject();
    12339 
    12340  if(m_IsCustomPool)
    12341  {
    12342  json.WriteString("MemoryTypeIndex");
    12343  json.WriteNumber(m_MemoryTypeIndex);
    12344 
    12345  json.WriteString("BlockSize");
    12346  json.WriteNumber(m_PreferredBlockSize);
    12347 
    12348  json.WriteString("BlockCount");
    12349  json.BeginObject(true);
    12350  if(m_MinBlockCount > 0)
    12351  {
    12352  json.WriteString("Min");
    12353  json.WriteNumber((uint64_t)m_MinBlockCount);
    12354  }
    12355  if(m_MaxBlockCount < SIZE_MAX)
    12356  {
    12357  json.WriteString("Max");
    12358  json.WriteNumber((uint64_t)m_MaxBlockCount);
    12359  }
    12360  json.WriteString("Cur");
    12361  json.WriteNumber((uint64_t)m_Blocks.size());
    12362  json.EndObject();
    12363 
    12364  if(m_FrameInUseCount > 0)
    12365  {
    12366  json.WriteString("FrameInUseCount");
    12367  json.WriteNumber(m_FrameInUseCount);
    12368  }
    12369 
    12370  if(m_Algorithm != 0)
    12371  {
    12372  json.WriteString("Algorithm");
    12373  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    12374  }
    12375  }
    12376  else
    12377  {
    12378  json.WriteString("PreferredBlockSize");
    12379  json.WriteNumber(m_PreferredBlockSize);
    12380  }
    12381 
    12382  json.WriteString("Blocks");
    12383  json.BeginObject();
    12384  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12385  {
    12386  json.BeginString();
    12387  json.ContinueString(m_Blocks[i]->GetId());
    12388  json.EndString();
    12389 
    12390  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    12391  }
    12392  json.EndObject();
    12393 
    12394  json.EndObject();
    12395 }
    12396 
    12397 #endif // #if VMA_STATS_STRING_ENABLED
    12398 
    12399 void VmaBlockVector::Defragment(
    12400  class VmaBlockVectorDefragmentationContext* pCtx,
    12401  VmaDefragmentationStats* pStats,
    12402  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    12403  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    12404  VkCommandBuffer commandBuffer)
    12405 {
    12406  pCtx->res = VK_SUCCESS;
    12407 
    12408  const VkMemoryPropertyFlags memPropFlags =
    12409  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
    12410  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
    12411  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
    12412 
    12413  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
    12414  isHostVisible;
    12415  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
    12416  !IsCorruptionDetectionEnabled();
    12417 
    12418  // There are options to defragment this memory type.
    12419  if(canDefragmentOnCpu || canDefragmentOnGpu)
    12420  {
    12421  bool defragmentOnGpu;
    12422  // There is only one option to defragment this memory type.
    12423  if(canDefragmentOnGpu != canDefragmentOnCpu)
    12424  {
    12425  defragmentOnGpu = canDefragmentOnGpu;
    12426  }
    12427  // Both options are available: Heuristics to choose the best one.
    12428  else
    12429  {
    12430  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
    12431  m_hAllocator->IsIntegratedGpu();
    12432  }
    12433 
    12434  bool overlappingMoveSupported = !defragmentOnGpu;
    12435 
    12436  if(m_hAllocator->m_UseMutex)
    12437  {
    12438  m_Mutex.LockWrite();
    12439  pCtx->mutexLocked = true;
    12440  }
    12441 
    12442  pCtx->Begin(overlappingMoveSupported);
    12443 
    12444  // Defragment.
    12445 
    12446  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
    12447  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
    12448  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
    12449  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
    12450  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
    12451 
    12452  // Accumulate statistics.
    12453  if(pStats != VMA_NULL)
    12454  {
    12455  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
    12456  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
    12457  pStats->bytesMoved += bytesMoved;
    12458  pStats->allocationsMoved += allocationsMoved;
    12459  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    12460  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    12461  if(defragmentOnGpu)
    12462  {
    12463  maxGpuBytesToMove -= bytesMoved;
    12464  maxGpuAllocationsToMove -= allocationsMoved;
    12465  }
    12466  else
    12467  {
    12468  maxCpuBytesToMove -= bytesMoved;
    12469  maxCpuAllocationsToMove -= allocationsMoved;
    12470  }
    12471  }
    12472 
    12473  if(pCtx->res >= VK_SUCCESS)
    12474  {
    12475  if(defragmentOnGpu)
    12476  {
    12477  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
    12478  }
    12479  else
    12480  {
    12481  ApplyDefragmentationMovesCpu(pCtx, moves);
    12482  }
    12483  }
    12484  }
    12485 }
    12486 
    12487 void VmaBlockVector::DefragmentationEnd(
    12488  class VmaBlockVectorDefragmentationContext* pCtx,
    12489  VmaDefragmentationStats* pStats)
    12490 {
    12491  // Destroy buffers.
    12492  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
    12493  {
    12494  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
    12495  if(blockCtx.hBuffer)
    12496  {
    12497  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
    12498  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
    12499  }
    12500  }
    12501 
    12502  if(pCtx->res >= VK_SUCCESS)
    12503  {
    12504  FreeEmptyBlocks(pStats);
    12505  }
    12506 
    12507  if(pCtx->mutexLocked)
    12508  {
    12509  VMA_ASSERT(m_hAllocator->m_UseMutex);
    12510  m_Mutex.UnlockWrite();
    12511  }
    12512 }
    12513 
    12514 size_t VmaBlockVector::CalcAllocationCount() const
    12515 {
    12516  size_t result = 0;
    12517  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12518  {
    12519  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
    12520  }
    12521  return result;
    12522 }
    12523 
    12524 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
    12525 {
    12526  if(m_BufferImageGranularity == 1)
    12527  {
    12528  return false;
    12529  }
    12530  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
    12531  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    12532  {
    12533  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
    12534  VMA_ASSERT(m_Algorithm == 0);
    12535  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
    12536  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
    12537  {
    12538  return true;
    12539  }
    12540  }
    12541  return false;
    12542 }
    12543 
    12544 void VmaBlockVector::MakePoolAllocationsLost(
    12545  uint32_t currentFrameIndex,
    12546  size_t* pLostAllocationCount)
    12547 {
    12548  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    12549  size_t lostAllocationCount = 0;
    12550  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12551  {
    12552  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12553  VMA_ASSERT(pBlock);
    12554  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    12555  }
    12556  if(pLostAllocationCount != VMA_NULL)
    12557  {
    12558  *pLostAllocationCount = lostAllocationCount;
    12559  }
    12560 }
    12561 
    12562 VkResult VmaBlockVector::CheckCorruption()
    12563 {
    12564  if(!IsCorruptionDetectionEnabled())
    12565  {
    12566  return VK_ERROR_FEATURE_NOT_PRESENT;
    12567  }
    12568 
    12569  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12570  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12571  {
    12572  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12573  VMA_ASSERT(pBlock);
    12574  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    12575  if(res != VK_SUCCESS)
    12576  {
    12577  return res;
    12578  }
    12579  }
    12580  return VK_SUCCESS;
    12581 }
    12582 
    12583 void VmaBlockVector::AddStats(VmaStats* pStats)
    12584 {
    12585  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    12586  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    12587 
    12588  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12589 
    12590  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12591  {
    12592  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12593  VMA_ASSERT(pBlock);
    12594  VMA_HEAVY_ASSERT(pBlock->Validate());
    12595  VmaStatInfo allocationStatInfo;
    12596  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    12597  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12598  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12599  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12600  }
    12601 }
    12602 
    12604 // VmaDefragmentationAlgorithm_Generic members definition
    12605 
    12606 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
    12607  VmaAllocator hAllocator,
    12608  VmaBlockVector* pBlockVector,
    12609  uint32_t currentFrameIndex,
    12610  bool overlappingMoveSupported) :
    12611  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12612  m_AllAllocations(false),
    12613  m_AllocationCount(0),
    12614  m_BytesMoved(0),
    12615  m_AllocationsMoved(0),
    12616  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    12617 {
    12618  // Create block info for each block.
    12619  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    12620  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12621  {
    12622  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    12623  pBlockInfo->m_OriginalBlockIndex = blockIndex;
    12624  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    12625  m_Blocks.push_back(pBlockInfo);
    12626  }
    12627 
    12628  // Sort them by m_pBlock pointer value.
    12629  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    12630 }
    12631 
    12632 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
    12633 {
    12634  for(size_t i = m_Blocks.size(); i--; )
    12635  {
    12636  vma_delete(m_hAllocator, m_Blocks[i]);
    12637  }
    12638 }
    12639 
    12640 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    12641 {
    12642  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    12643  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    12644  {
    12645  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
    12646  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    12647  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    12648  {
    12649  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
    12650  (*it)->m_Allocations.push_back(allocInfo);
    12651  }
    12652  else
    12653  {
    12654  VMA_ASSERT(0);
    12655  }
    12656 
    12657  ++m_AllocationCount;
    12658  }
    12659 }
    12660 
    12661 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
    12662  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12663  VkDeviceSize maxBytesToMove,
    12664  uint32_t maxAllocationsToMove)
    12665 {
    12666  if(m_Blocks.empty())
    12667  {
    12668  return VK_SUCCESS;
    12669  }
    12670 
    12671  // This is a choice based on research.
    12672  // Option 1:
    12673  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
    12674  // Option 2:
    12675  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
    12676  // Option 3:
    12677  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
    12678 
    12679  size_t srcBlockMinIndex = 0;
    12680  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
    12681  /*
    12682  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
    12683  {
    12684  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
    12685  if(blocksWithNonMovableCount > 0)
    12686  {
    12687  srcBlockMinIndex = blocksWithNonMovableCount - 1;
    12688  }
    12689  }
    12690  */
    12691 
    12692  size_t srcBlockIndex = m_Blocks.size() - 1;
    12693  size_t srcAllocIndex = SIZE_MAX;
    12694  for(;;)
    12695  {
    12696  // 1. Find next allocation to move.
    12697  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    12698  // 1.2. Then start from last to first m_Allocations.
    12699  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    12700  {
    12701  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    12702  {
    12703  // Finished: no more allocations to process.
    12704  if(srcBlockIndex == srcBlockMinIndex)
    12705  {
    12706  return VK_SUCCESS;
    12707  }
    12708  else
    12709  {
    12710  --srcBlockIndex;
    12711  srcAllocIndex = SIZE_MAX;
    12712  }
    12713  }
    12714  else
    12715  {
    12716  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    12717  }
    12718  }
    12719 
    12720  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    12721  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    12722 
    12723  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    12724  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    12725  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    12726  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    12727 
    12728  // 2. Try to find new place for this allocation in preceding or current block.
    12729  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    12730  {
    12731  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    12732  VmaAllocationRequest dstAllocRequest;
    12733  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    12734  m_CurrentFrameIndex,
    12735  m_pBlockVector->GetFrameInUseCount(),
    12736  m_pBlockVector->GetBufferImageGranularity(),
    12737  size,
    12738  alignment,
    12739  false, // upperAddress
    12740  suballocType,
    12741  false, // canMakeOtherLost
    12742  strategy,
    12743  &dstAllocRequest) &&
    12744  MoveMakesSense(
    12745  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    12746  {
    12747  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    12748 
    12749  // Reached limit on number of allocations or bytes to move.
    12750  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    12751  (m_BytesMoved + size > maxBytesToMove))
    12752  {
    12753  return VK_SUCCESS;
    12754  }
    12755 
    12756  VmaDefragmentationMove move;
    12757  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
    12758  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
    12759  move.srcOffset = srcOffset;
    12760  move.dstOffset = dstAllocRequest.offset;
    12761  move.size = size;
    12762  moves.push_back(move);
    12763 
    12764  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    12765  dstAllocRequest,
    12766  suballocType,
    12767  size,
    12768  allocInfo.m_hAllocation);
    12769  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    12770 
    12771  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    12772 
    12773  if(allocInfo.m_pChanged != VMA_NULL)
    12774  {
    12775  *allocInfo.m_pChanged = VK_TRUE;
    12776  }
    12777 
    12778  ++m_AllocationsMoved;
    12779  m_BytesMoved += size;
    12780 
    12781  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    12782 
    12783  break;
    12784  }
    12785  }
    12786 
    12787  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    12788 
    12789  if(srcAllocIndex > 0)
    12790  {
    12791  --srcAllocIndex;
    12792  }
    12793  else
    12794  {
    12795  if(srcBlockIndex > 0)
    12796  {
    12797  --srcBlockIndex;
    12798  srcAllocIndex = SIZE_MAX;
    12799  }
    12800  else
    12801  {
    12802  return VK_SUCCESS;
    12803  }
    12804  }
    12805  }
    12806 }
    12807 
    12808 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
    12809 {
    12810  size_t result = 0;
    12811  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12812  {
    12813  if(m_Blocks[i]->m_HasNonMovableAllocations)
    12814  {
    12815  ++result;
    12816  }
    12817  }
    12818  return result;
    12819 }
    12820 
    12821 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
    12822  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12823  VkDeviceSize maxBytesToMove,
    12824  uint32_t maxAllocationsToMove)
    12825 {
    12826  if(!m_AllAllocations && m_AllocationCount == 0)
    12827  {
    12828  return VK_SUCCESS;
    12829  }
    12830 
    12831  const size_t blockCount = m_Blocks.size();
    12832  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12833  {
    12834  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    12835 
    12836  if(m_AllAllocations)
    12837  {
    12838  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
    12839  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
    12840  it != pMetadata->m_Suballocations.end();
    12841  ++it)
    12842  {
    12843  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    12844  {
    12845  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
    12846  pBlockInfo->m_Allocations.push_back(allocInfo);
    12847  }
    12848  }
    12849  }
    12850 
    12851  pBlockInfo->CalcHasNonMovableAllocations();
    12852 
    12853  // This is a choice based on research.
    12854  // Option 1:
    12855  pBlockInfo->SortAllocationsByOffsetDescending();
    12856  // Option 2:
    12857  //pBlockInfo->SortAllocationsBySizeDescending();
    12858  }
    12859 
    12860  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    12861  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    12862 
    12863  // This is a choice based on research.
    12864  const uint32_t roundCount = 2;
    12865 
    12866  // Execute defragmentation rounds (the main part).
    12867  VkResult result = VK_SUCCESS;
    12868  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
    12869  {
    12870  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
    12871  }
    12872 
    12873  return result;
    12874 }
    12875 
    12876 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
    12877  size_t dstBlockIndex, VkDeviceSize dstOffset,
    12878  size_t srcBlockIndex, VkDeviceSize srcOffset)
    12879 {
    12880  if(dstBlockIndex < srcBlockIndex)
    12881  {
    12882  return true;
    12883  }
    12884  if(dstBlockIndex > srcBlockIndex)
    12885  {
    12886  return false;
    12887  }
    12888  if(dstOffset < srcOffset)
    12889  {
    12890  return true;
    12891  }
    12892  return false;
    12893 }
    12894 
    12896 // VmaDefragmentationAlgorithm_Fast
    12897 
    12898 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
    12899  VmaAllocator hAllocator,
    12900  VmaBlockVector* pBlockVector,
    12901  uint32_t currentFrameIndex,
    12902  bool overlappingMoveSupported) :
    12903  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12904  m_OverlappingMoveSupported(overlappingMoveSupported),
    12905  m_AllocationCount(0),
    12906  m_AllAllocations(false),
    12907  m_BytesMoved(0),
    12908  m_AllocationsMoved(0),
    12909  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
    12910 {
    12911  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
    12912 
    12913 }
    12914 
    12915 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
    12916 {
    12917 }
    12918 
    12919 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
    12920  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12921  VkDeviceSize maxBytesToMove,
    12922  uint32_t maxAllocationsToMove)
    12923 {
    12924  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
    12925 
    12926  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12927  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
    12928  {
    12929  return VK_SUCCESS;
    12930  }
    12931 
    12932  PreprocessMetadata();
    12933 
    12934  // Sort blocks in order from most destination.
    12935 
    12936  m_BlockInfos.resize(blockCount);
    12937  for(size_t i = 0; i < blockCount; ++i)
    12938  {
    12939  m_BlockInfos[i].origBlockIndex = i;
    12940  }
    12941 
    12942  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
    12943  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
    12944  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
    12945  });
    12946 
    12947  // THE MAIN ALGORITHM
    12948 
    12949  FreeSpaceDatabase freeSpaceDb;
    12950 
    12951  size_t dstBlockInfoIndex = 0;
    12952  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12953  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12954  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12955  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
    12956  VkDeviceSize dstOffset = 0;
    12957 
    12958  bool end = false;
    12959  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
    12960  {
    12961  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
    12962  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
    12963  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
    12964  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
    12965  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
    12966  {
    12967  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
    12968  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
    12969  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
    12970  if(m_AllocationsMoved == maxAllocationsToMove ||
    12971  m_BytesMoved + srcAllocSize > maxBytesToMove)
    12972  {
    12973  end = true;
    12974  break;
    12975  }
    12976  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
    12977 
    12978  // Try to place it in one of free spaces from the database.
    12979  size_t freeSpaceInfoIndex;
    12980  VkDeviceSize dstAllocOffset;
    12981  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
    12982  freeSpaceInfoIndex, dstAllocOffset))
    12983  {
    12984  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
    12985  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
    12986  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
    12987 
    12988  // Same block
    12989  if(freeSpaceInfoIndex == srcBlockInfoIndex)
    12990  {
    12991  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    12992 
    12993  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    12994 
    12995  VmaSuballocation suballoc = *srcSuballocIt;
    12996  suballoc.offset = dstAllocOffset;
    12997  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
    12998  m_BytesMoved += srcAllocSize;
    12999  ++m_AllocationsMoved;
    13000 
    13001  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13002  ++nextSuballocIt;
    13003  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13004  srcSuballocIt = nextSuballocIt;
    13005 
    13006  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13007 
    13008  VmaDefragmentationMove move = {
    13009  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13010  srcAllocOffset, dstAllocOffset,
    13011  srcAllocSize };
    13012  moves.push_back(move);
    13013  }
    13014  // Different block
    13015  else
    13016  {
    13017  // MOVE OPTION 2: Move the allocation to a different block.
    13018 
    13019  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
    13020 
    13021  VmaSuballocation suballoc = *srcSuballocIt;
    13022  suballoc.offset = dstAllocOffset;
    13023  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
    13024  m_BytesMoved += srcAllocSize;
    13025  ++m_AllocationsMoved;
    13026 
    13027  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13028  ++nextSuballocIt;
    13029  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13030  srcSuballocIt = nextSuballocIt;
    13031 
    13032  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13033 
    13034  VmaDefragmentationMove move = {
    13035  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13036  srcAllocOffset, dstAllocOffset,
    13037  srcAllocSize };
    13038  moves.push_back(move);
    13039  }
    13040  }
    13041  else
    13042  {
    13043  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
    13044 
    13045  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
    13046  while(dstBlockInfoIndex < srcBlockInfoIndex &&
    13047  dstAllocOffset + srcAllocSize > dstBlockSize)
    13048  {
    13049  // But before that, register remaining free space at the end of dst block.
    13050  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
    13051 
    13052  ++dstBlockInfoIndex;
    13053  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    13054  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    13055  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    13056  dstBlockSize = pDstMetadata->GetSize();
    13057  dstOffset = 0;
    13058  dstAllocOffset = 0;
    13059  }
    13060 
    13061  // Same block
    13062  if(dstBlockInfoIndex == srcBlockInfoIndex)
    13063  {
    13064  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13065 
    13066  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
    13067 
    13068  bool skipOver = overlap;
    13069  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
    13070  {
    13071  // If destination and source place overlap, skip if it would move it
    13072  // by only < 1/64 of its size.
    13073  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
    13074  }
    13075 
    13076  if(skipOver)
    13077  {
    13078  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
    13079 
    13080  dstOffset = srcAllocOffset + srcAllocSize;
    13081  ++srcSuballocIt;
    13082  }
    13083  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13084  else
    13085  {
    13086  srcSuballocIt->offset = dstAllocOffset;
    13087  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
    13088  dstOffset = dstAllocOffset + srcAllocSize;
    13089  m_BytesMoved += srcAllocSize;
    13090  ++m_AllocationsMoved;
    13091  ++srcSuballocIt;
    13092  VmaDefragmentationMove move = {
    13093  srcOrigBlockIndex, dstOrigBlockIndex,
    13094  srcAllocOffset, dstAllocOffset,
    13095  srcAllocSize };
    13096  moves.push_back(move);
    13097  }
    13098  }
    13099  // Different block
    13100  else
    13101  {
    13102  // MOVE OPTION 2: Move the allocation to a different block.
    13103 
    13104  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
    13105  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
    13106 
    13107  VmaSuballocation suballoc = *srcSuballocIt;
    13108  suballoc.offset = dstAllocOffset;
    13109  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
    13110  dstOffset = dstAllocOffset + srcAllocSize;
    13111  m_BytesMoved += srcAllocSize;
    13112  ++m_AllocationsMoved;
    13113 
    13114  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13115  ++nextSuballocIt;
    13116  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13117  srcSuballocIt = nextSuballocIt;
    13118 
    13119  pDstMetadata->m_Suballocations.push_back(suballoc);
    13120 
    13121  VmaDefragmentationMove move = {
    13122  srcOrigBlockIndex, dstOrigBlockIndex,
    13123  srcAllocOffset, dstAllocOffset,
    13124  srcAllocSize };
    13125  moves.push_back(move);
    13126  }
    13127  }
    13128  }
    13129  }
    13130 
    13131  m_BlockInfos.clear();
    13132 
    13133  PostprocessMetadata();
    13134 
    13135  return VK_SUCCESS;
    13136 }
    13137 
    13138 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
    13139 {
    13140  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13141  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13142  {
    13143  VmaBlockMetadata_Generic* const pMetadata =
    13144  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13145  pMetadata->m_FreeCount = 0;
    13146  pMetadata->m_SumFreeSize = pMetadata->GetSize();
    13147  pMetadata->m_FreeSuballocationsBySize.clear();
    13148  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13149  it != pMetadata->m_Suballocations.end(); )
    13150  {
    13151  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
    13152  {
    13153  VmaSuballocationList::iterator nextIt = it;
    13154  ++nextIt;
    13155  pMetadata->m_Suballocations.erase(it);
    13156  it = nextIt;
    13157  }
    13158  else
    13159  {
    13160  ++it;
    13161  }
    13162  }
    13163  }
    13164 }
    13165 
    13166 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
    13167 {
    13168  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13169  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13170  {
    13171  VmaBlockMetadata_Generic* const pMetadata =
    13172  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13173  const VkDeviceSize blockSize = pMetadata->GetSize();
    13174 
    13175  // No allocations in this block - entire area is free.
    13176  if(pMetadata->m_Suballocations.empty())
    13177  {
    13178  pMetadata->m_FreeCount = 1;
    13179  //pMetadata->m_SumFreeSize is already set to blockSize.
    13180  VmaSuballocation suballoc = {
    13181  0, // offset
    13182  blockSize, // size
    13183  VMA_NULL, // hAllocation
    13184  VMA_SUBALLOCATION_TYPE_FREE };
    13185  pMetadata->m_Suballocations.push_back(suballoc);
    13186  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
    13187  }
    13188  // There are some allocations in this block.
    13189  else
    13190  {
    13191  VkDeviceSize offset = 0;
    13192  VmaSuballocationList::iterator it;
    13193  for(it = pMetadata->m_Suballocations.begin();
    13194  it != pMetadata->m_Suballocations.end();
    13195  ++it)
    13196  {
    13197  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
    13198  VMA_ASSERT(it->offset >= offset);
    13199 
    13200  // Need to insert preceding free space.
    13201  if(it->offset > offset)
    13202  {
    13203  ++pMetadata->m_FreeCount;
    13204  const VkDeviceSize freeSize = it->offset - offset;
    13205  VmaSuballocation suballoc = {
    13206  offset, // offset
    13207  freeSize, // size
    13208  VMA_NULL, // hAllocation
    13209  VMA_SUBALLOCATION_TYPE_FREE };
    13210  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13211  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13212  {
    13213  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
    13214  }
    13215  }
    13216 
    13217  pMetadata->m_SumFreeSize -= it->size;
    13218  offset = it->offset + it->size;
    13219  }
    13220 
    13221  // Need to insert trailing free space.
    13222  if(offset < blockSize)
    13223  {
    13224  ++pMetadata->m_FreeCount;
    13225  const VkDeviceSize freeSize = blockSize - offset;
    13226  VmaSuballocation suballoc = {
    13227  offset, // offset
    13228  freeSize, // size
    13229  VMA_NULL, // hAllocation
    13230  VMA_SUBALLOCATION_TYPE_FREE };
    13231  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
    13232  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13233  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13234  {
    13235  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
    13236  }
    13237  }
    13238 
    13239  VMA_SORT(
    13240  pMetadata->m_FreeSuballocationsBySize.begin(),
    13241  pMetadata->m_FreeSuballocationsBySize.end(),
    13242  VmaSuballocationItemSizeLess());
    13243  }
    13244 
    13245  VMA_HEAVY_ASSERT(pMetadata->Validate());
    13246  }
    13247 }
    13248 
    13249 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
    13250 {
    13251  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
    13252  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13253  while(it != pMetadata->m_Suballocations.end())
    13254  {
    13255  if(it->offset < suballoc.offset)
    13256  {
    13257  ++it;
    13258  }
    13259  }
    13260  pMetadata->m_Suballocations.insert(it, suballoc);
    13261 }
    13262 
    13264 // VmaBlockVectorDefragmentationContext
    13265 
    13266 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
    13267  VmaAllocator hAllocator,
    13268  VmaPool hCustomPool,
    13269  VmaBlockVector* pBlockVector,
    13270  uint32_t currFrameIndex,
    13271  uint32_t algorithmFlags) :
    13272  res(VK_SUCCESS),
    13273  mutexLocked(false),
    13274  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
    13275  m_hAllocator(hAllocator),
    13276  m_hCustomPool(hCustomPool),
    13277  m_pBlockVector(pBlockVector),
    13278  m_CurrFrameIndex(currFrameIndex),
    13279  m_AlgorithmFlags(algorithmFlags),
    13280  m_pAlgorithm(VMA_NULL),
    13281  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
    13282  m_AllAllocations(false)
    13283 {
    13284 }
    13285 
    13286 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
    13287 {
    13288  vma_delete(m_hAllocator, m_pAlgorithm);
    13289 }
    13290 
    13291 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    13292 {
    13293  AllocInfo info = { hAlloc, pChanged };
    13294  m_Allocations.push_back(info);
    13295 }
    13296 
    13297 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
    13298 {
    13299  const bool allAllocations = m_AllAllocations ||
    13300  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
    13301 
    13302  /********************************
    13303  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
    13304  ********************************/
    13305 
    13306  /*
    13307  Fast algorithm is supported only when certain criteria are met:
    13308  - VMA_DEBUG_MARGIN is 0.
    13309  - All allocations in this block vector are moveable.
    13310  - There is no possibility of image/buffer granularity conflict.
    13311  */
    13312  if(VMA_DEBUG_MARGIN == 0 &&
    13313  allAllocations &&
    13314  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
    13315  {
    13316  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
    13317  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13318  }
    13319  else
    13320  {
    13321  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
    13322  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13323  }
    13324 
    13325  if(allAllocations)
    13326  {
    13327  m_pAlgorithm->AddAll();
    13328  }
    13329  else
    13330  {
    13331  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
    13332  {
    13333  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
    13334  }
    13335  }
    13336 }
    13337 
    13339 // VmaDefragmentationContext
    13340 
    13341 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
    13342  VmaAllocator hAllocator,
    13343  uint32_t currFrameIndex,
    13344  uint32_t flags,
    13345  VmaDefragmentationStats* pStats) :
    13346  m_hAllocator(hAllocator),
    13347  m_CurrFrameIndex(currFrameIndex),
    13348  m_Flags(flags),
    13349  m_pStats(pStats),
    13350  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
    13351 {
    13352  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
    13353 }
    13354 
    13355 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
    13356 {
    13357  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13358  {
    13359  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
    13360  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13361  vma_delete(m_hAllocator, pBlockVectorCtx);
    13362  }
    13363  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
    13364  {
    13365  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
    13366  if(pBlockVectorCtx)
    13367  {
    13368  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13369  vma_delete(m_hAllocator, pBlockVectorCtx);
    13370  }
    13371  }
    13372 }
    13373 
    13374 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
    13375 {
    13376  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13377  {
    13378  VmaPool pool = pPools[poolIndex];
    13379  VMA_ASSERT(pool);
    13380  // Pools with algorithm other than default are not defragmented.
    13381  if(pool->m_BlockVector.GetAlgorithm() == 0)
    13382  {
    13383  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13384 
    13385  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13386  {
    13387  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
    13388  {
    13389  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13390  break;
    13391  }
    13392  }
    13393 
    13394  if(!pBlockVectorDefragCtx)
    13395  {
    13396  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13397  m_hAllocator,
    13398  pool,
    13399  &pool->m_BlockVector,
    13400  m_CurrFrameIndex,
    13401  m_Flags);
    13402  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13403  }
    13404 
    13405  pBlockVectorDefragCtx->AddAll();
    13406  }
    13407  }
    13408 }
    13409 
    13410 void VmaDefragmentationContext_T::AddAllocations(
    13411  uint32_t allocationCount,
    13412  VmaAllocation* pAllocations,
    13413  VkBool32* pAllocationsChanged)
    13414 {
    13415  // Dispatch pAllocations among defragmentators. Create them when necessary.
    13416  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    13417  {
    13418  const VmaAllocation hAlloc = pAllocations[allocIndex];
    13419  VMA_ASSERT(hAlloc);
    13420  // DedicatedAlloc cannot be defragmented.
    13421  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    13422  // Lost allocation cannot be defragmented.
    13423  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    13424  {
    13425  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13426 
    13427  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
    13428  // This allocation belongs to custom pool.
    13429  if(hAllocPool != VK_NULL_HANDLE)
    13430  {
    13431  // Pools with algorithm other than default are not defragmented.
    13432  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    13433  {
    13434  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13435  {
    13436  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
    13437  {
    13438  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13439  break;
    13440  }
    13441  }
    13442  if(!pBlockVectorDefragCtx)
    13443  {
    13444  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13445  m_hAllocator,
    13446  hAllocPool,
    13447  &hAllocPool->m_BlockVector,
    13448  m_CurrFrameIndex,
    13449  m_Flags);
    13450  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13451  }
    13452  }
    13453  }
    13454  // This allocation belongs to default pool.
    13455  else
    13456  {
    13457  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    13458  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
    13459  if(!pBlockVectorDefragCtx)
    13460  {
    13461  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13462  m_hAllocator,
    13463  VMA_NULL, // hCustomPool
    13464  m_hAllocator->m_pBlockVectors[memTypeIndex],
    13465  m_CurrFrameIndex,
    13466  m_Flags);
    13467  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
    13468  }
    13469  }
    13470 
    13471  if(pBlockVectorDefragCtx)
    13472  {
    13473  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    13474  &pAllocationsChanged[allocIndex] : VMA_NULL;
    13475  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
    13476  }
    13477  }
    13478  }
    13479 }
    13480 
    13481 VkResult VmaDefragmentationContext_T::Defragment(
    13482  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    13483  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    13484  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
    13485 {
    13486  if(pStats)
    13487  {
    13488  memset(pStats, 0, sizeof(VmaDefragmentationStats));
    13489  }
    13490 
    13491  if(commandBuffer == VK_NULL_HANDLE)
    13492  {
    13493  maxGpuBytesToMove = 0;
    13494  maxGpuAllocationsToMove = 0;
    13495  }
    13496 
    13497  VkResult res = VK_SUCCESS;
    13498 
    13499  // Process default pools.
    13500  for(uint32_t memTypeIndex = 0;
    13501  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
    13502  ++memTypeIndex)
    13503  {
    13504  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
    13505  if(pBlockVectorCtx)
    13506  {
    13507  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
    13508  pBlockVectorCtx->GetBlockVector()->Defragment(
    13509  pBlockVectorCtx,
    13510  pStats,
    13511  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13512  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13513  commandBuffer);
    13514  if(pBlockVectorCtx->res != VK_SUCCESS)
    13515  {
    13516  res = pBlockVectorCtx->res;
    13517  }
    13518  }
    13519  }
    13520 
    13521  // Process custom pools.
    13522  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
    13523  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
    13524  ++customCtxIndex)
    13525  {
    13526  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
    13527  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
    13528  pBlockVectorCtx->GetBlockVector()->Defragment(
    13529  pBlockVectorCtx,
    13530  pStats,
    13531  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13532  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13533  commandBuffer);
    13534  if(pBlockVectorCtx->res != VK_SUCCESS)
    13535  {
    13536  res = pBlockVectorCtx->res;
    13537  }
    13538  }
    13539 
    13540  return res;
    13541 }
    13542 
    13544 // VmaRecorder
    13545 
    13546 #if VMA_RECORDING_ENABLED
    13547 
    13548 VmaRecorder::VmaRecorder() :
    13549  m_UseMutex(true),
    13550  m_Flags(0),
    13551  m_File(VMA_NULL),
    13552  m_Freq(INT64_MAX),
    13553  m_StartCounter(INT64_MAX)
    13554 {
    13555 }
    13556 
    13557 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    13558 {
    13559  m_UseMutex = useMutex;
    13560  m_Flags = settings.flags;
    13561 
    13562  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    13563  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    13564 
    13565  // Open file for writing.
    13566  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    13567  if(err != 0)
    13568  {
    13569  return VK_ERROR_INITIALIZATION_FAILED;
    13570  }
    13571 
    13572  // Write header.
    13573  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    13574  fprintf(m_File, "%s\n", "1,5");
    13575 
    13576  return VK_SUCCESS;
    13577 }
    13578 
    13579 VmaRecorder::~VmaRecorder()
    13580 {
    13581  if(m_File != VMA_NULL)
    13582  {
    13583  fclose(m_File);
    13584  }
    13585 }
    13586 
    13587 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    13588 {
    13589  CallParams callParams;
    13590  GetBasicParams(callParams);
    13591 
    13592  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13593  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13594  Flush();
    13595 }
    13596 
    13597 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    13598 {
    13599  CallParams callParams;
    13600  GetBasicParams(callParams);
    13601 
    13602  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13603  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13604  Flush();
    13605 }
    13606 
    13607 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    13608 {
    13609  CallParams callParams;
    13610  GetBasicParams(callParams);
    13611 
    13612  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13613  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    13614  createInfo.memoryTypeIndex,
    13615  createInfo.flags,
    13616  createInfo.blockSize,
    13617  (uint64_t)createInfo.minBlockCount,
    13618  (uint64_t)createInfo.maxBlockCount,
    13619  createInfo.frameInUseCount,
    13620  pool);
    13621  Flush();
    13622 }
    13623 
    13624 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    13625 {
    13626  CallParams callParams;
    13627  GetBasicParams(callParams);
    13628 
    13629  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13630  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    13631  pool);
    13632  Flush();
    13633 }
    13634 
    13635 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    13636  const VkMemoryRequirements& vkMemReq,
    13637  const VmaAllocationCreateInfo& createInfo,
    13638  VmaAllocation allocation)
    13639 {
    13640  CallParams callParams;
    13641  GetBasicParams(callParams);
    13642 
    13643  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13644  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13645  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13646  vkMemReq.size,
    13647  vkMemReq.alignment,
    13648  vkMemReq.memoryTypeBits,
    13649  createInfo.flags,
    13650  createInfo.usage,
    13651  createInfo.requiredFlags,
    13652  createInfo.preferredFlags,
    13653  createInfo.memoryTypeBits,
    13654  createInfo.pool,
    13655  allocation,
    13656  userDataStr.GetString());
    13657  Flush();
    13658 }
    13659 
    13660 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
    13661  const VkMemoryRequirements& vkMemReq,
    13662  const VmaAllocationCreateInfo& createInfo,
    13663  uint64_t allocationCount,
    13664  const VmaAllocation* pAllocations)
    13665 {
    13666  CallParams callParams;
    13667  GetBasicParams(callParams);
    13668 
    13669  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13670  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13671  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
    13672  vkMemReq.size,
    13673  vkMemReq.alignment,
    13674  vkMemReq.memoryTypeBits,
    13675  createInfo.flags,
    13676  createInfo.usage,
    13677  createInfo.requiredFlags,
    13678  createInfo.preferredFlags,
    13679  createInfo.memoryTypeBits,
    13680  createInfo.pool);
    13681  PrintPointerList(allocationCount, pAllocations);
    13682  fprintf(m_File, ",%s\n", userDataStr.GetString());
    13683  Flush();
    13684 }
    13685 
    13686 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    13687  const VkMemoryRequirements& vkMemReq,
    13688  bool requiresDedicatedAllocation,
    13689  bool prefersDedicatedAllocation,
    13690  const VmaAllocationCreateInfo& createInfo,
    13691  VmaAllocation allocation)
    13692 {
    13693  CallParams callParams;
    13694  GetBasicParams(callParams);
    13695 
    13696  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13697  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13698  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13699  vkMemReq.size,
    13700  vkMemReq.alignment,
    13701  vkMemReq.memoryTypeBits,
    13702  requiresDedicatedAllocation ? 1 : 0,
    13703  prefersDedicatedAllocation ? 1 : 0,
    13704  createInfo.flags,
    13705  createInfo.usage,
    13706  createInfo.requiredFlags,
    13707  createInfo.preferredFlags,
    13708  createInfo.memoryTypeBits,
    13709  createInfo.pool,
    13710  allocation,
    13711  userDataStr.GetString());
    13712  Flush();
    13713 }
    13714 
    13715 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    13716  const VkMemoryRequirements& vkMemReq,
    13717  bool requiresDedicatedAllocation,
    13718  bool prefersDedicatedAllocation,
    13719  const VmaAllocationCreateInfo& createInfo,
    13720  VmaAllocation allocation)
    13721 {
    13722  CallParams callParams;
    13723  GetBasicParams(callParams);
    13724 
    13725  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13726  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13727  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13728  vkMemReq.size,
    13729  vkMemReq.alignment,
    13730  vkMemReq.memoryTypeBits,
    13731  requiresDedicatedAllocation ? 1 : 0,
    13732  prefersDedicatedAllocation ? 1 : 0,
    13733  createInfo.flags,
    13734  createInfo.usage,
    13735  createInfo.requiredFlags,
    13736  createInfo.preferredFlags,
    13737  createInfo.memoryTypeBits,
    13738  createInfo.pool,
    13739  allocation,
    13740  userDataStr.GetString());
    13741  Flush();
    13742 }
    13743 
    13744 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    13745  VmaAllocation allocation)
    13746 {
    13747  CallParams callParams;
    13748  GetBasicParams(callParams);
    13749 
    13750  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13751  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13752  allocation);
    13753  Flush();
    13754 }
    13755 
    13756 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
    13757  uint64_t allocationCount,
    13758  const VmaAllocation* pAllocations)
    13759 {
    13760  CallParams callParams;
    13761  GetBasicParams(callParams);
    13762 
    13763  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13764  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
    13765  PrintPointerList(allocationCount, pAllocations);
    13766  fprintf(m_File, "\n");
    13767  Flush();
    13768 }
    13769 
    13770 void VmaRecorder::RecordResizeAllocation(
    13771  uint32_t frameIndex,
    13772  VmaAllocation allocation,
    13773  VkDeviceSize newSize)
    13774 {
    13775  CallParams callParams;
    13776  GetBasicParams(callParams);
    13777 
    13778  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13779  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13780  allocation, newSize);
    13781  Flush();
    13782 }
    13783 
    13784 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    13785  VmaAllocation allocation,
    13786  const void* pUserData)
    13787 {
    13788  CallParams callParams;
    13789  GetBasicParams(callParams);
    13790 
    13791  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13792  UserDataString userDataStr(
    13793  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    13794  pUserData);
    13795  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13796  allocation,
    13797  userDataStr.GetString());
    13798  Flush();
    13799 }
    13800 
    13801 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    13802  VmaAllocation allocation)
    13803 {
    13804  CallParams callParams;
    13805  GetBasicParams(callParams);
    13806 
    13807  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13808  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13809  allocation);
    13810  Flush();
    13811 }
    13812 
    13813 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    13814  VmaAllocation allocation)
    13815 {
    13816  CallParams callParams;
    13817  GetBasicParams(callParams);
    13818 
    13819  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13820  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13821  allocation);
    13822  Flush();
    13823 }
    13824 
    13825 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    13826  VmaAllocation allocation)
    13827 {
    13828  CallParams callParams;
    13829  GetBasicParams(callParams);
    13830 
    13831  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13832  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13833  allocation);
    13834  Flush();
    13835 }
    13836 
    13837 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    13838  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13839 {
    13840  CallParams callParams;
    13841  GetBasicParams(callParams);
    13842 
    13843  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13844  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13845  allocation,
    13846  offset,
    13847  size);
    13848  Flush();
    13849 }
    13850 
    13851 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    13852  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13853 {
    13854  CallParams callParams;
    13855  GetBasicParams(callParams);
    13856 
    13857  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13858  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13859  allocation,
    13860  offset,
    13861  size);
    13862  Flush();
    13863 }
    13864 
    13865 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    13866  const VkBufferCreateInfo& bufCreateInfo,
    13867  const VmaAllocationCreateInfo& allocCreateInfo,
    13868  VmaAllocation allocation)
    13869 {
    13870  CallParams callParams;
    13871  GetBasicParams(callParams);
    13872 
    13873  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13874  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13875  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13876  bufCreateInfo.flags,
    13877  bufCreateInfo.size,
    13878  bufCreateInfo.usage,
    13879  bufCreateInfo.sharingMode,
    13880  allocCreateInfo.flags,
    13881  allocCreateInfo.usage,
    13882  allocCreateInfo.requiredFlags,
    13883  allocCreateInfo.preferredFlags,
    13884  allocCreateInfo.memoryTypeBits,
    13885  allocCreateInfo.pool,
    13886  allocation,
    13887  userDataStr.GetString());
    13888  Flush();
    13889 }
    13890 
    13891 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    13892  const VkImageCreateInfo& imageCreateInfo,
    13893  const VmaAllocationCreateInfo& allocCreateInfo,
    13894  VmaAllocation allocation)
    13895 {
    13896  CallParams callParams;
    13897  GetBasicParams(callParams);
    13898 
    13899  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13900  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13901  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13902  imageCreateInfo.flags,
    13903  imageCreateInfo.imageType,
    13904  imageCreateInfo.format,
    13905  imageCreateInfo.extent.width,
    13906  imageCreateInfo.extent.height,
    13907  imageCreateInfo.extent.depth,
    13908  imageCreateInfo.mipLevels,
    13909  imageCreateInfo.arrayLayers,
    13910  imageCreateInfo.samples,
    13911  imageCreateInfo.tiling,
    13912  imageCreateInfo.usage,
    13913  imageCreateInfo.sharingMode,
    13914  imageCreateInfo.initialLayout,
    13915  allocCreateInfo.flags,
    13916  allocCreateInfo.usage,
    13917  allocCreateInfo.requiredFlags,
    13918  allocCreateInfo.preferredFlags,
    13919  allocCreateInfo.memoryTypeBits,
    13920  allocCreateInfo.pool,
    13921  allocation,
    13922  userDataStr.GetString());
    13923  Flush();
    13924 }
    13925 
    13926 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    13927  VmaAllocation allocation)
    13928 {
    13929  CallParams callParams;
    13930  GetBasicParams(callParams);
    13931 
    13932  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13933  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    13934  allocation);
    13935  Flush();
    13936 }
    13937 
    13938 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    13939  VmaAllocation allocation)
    13940 {
    13941  CallParams callParams;
    13942  GetBasicParams(callParams);
    13943 
    13944  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13945  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    13946  allocation);
    13947  Flush();
    13948 }
    13949 
    13950 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    13951  VmaAllocation allocation)
    13952 {
    13953  CallParams callParams;
    13954  GetBasicParams(callParams);
    13955 
    13956  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13957  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13958  allocation);
    13959  Flush();
    13960 }
    13961 
    13962 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    13963  VmaAllocation allocation)
    13964 {
    13965  CallParams callParams;
    13966  GetBasicParams(callParams);
    13967 
    13968  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13969  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    13970  allocation);
    13971  Flush();
    13972 }
    13973 
    13974 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    13975  VmaPool pool)
    13976 {
    13977  CallParams callParams;
    13978  GetBasicParams(callParams);
    13979 
    13980  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13981  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    13982  pool);
    13983  Flush();
    13984 }
    13985 
    13986 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
    13987  const VmaDefragmentationInfo2& info,
    13989 {
    13990  CallParams callParams;
    13991  GetBasicParams(callParams);
    13992 
    13993  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13994  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
    13995  info.flags);
    13996  PrintPointerList(info.allocationCount, info.pAllocations);
    13997  fprintf(m_File, ",");
    13998  PrintPointerList(info.poolCount, info.pPools);
    13999  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
    14000  info.maxCpuBytesToMove,
    14002  info.maxGpuBytesToMove,
    14004  info.commandBuffer,
    14005  ctx);
    14006  Flush();
    14007 }
    14008 
    14009 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
    14011 {
    14012  CallParams callParams;
    14013  GetBasicParams(callParams);
    14014 
    14015  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14016  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
    14017  ctx);
    14018  Flush();
    14019 }
    14020 
    14021 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    14022 {
    14023  if(pUserData != VMA_NULL)
    14024  {
    14025  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    14026  {
    14027  m_Str = (const char*)pUserData;
    14028  }
    14029  else
    14030  {
    14031  sprintf_s(m_PtrStr, "%p", pUserData);
    14032  m_Str = m_PtrStr;
    14033  }
    14034  }
    14035  else
    14036  {
    14037  m_Str = "";
    14038  }
    14039 }
    14040 
    14041 void VmaRecorder::WriteConfiguration(
    14042  const VkPhysicalDeviceProperties& devProps,
    14043  const VkPhysicalDeviceMemoryProperties& memProps,
    14044  bool dedicatedAllocationExtensionEnabled)
    14045 {
    14046  fprintf(m_File, "Config,Begin\n");
    14047 
    14048  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    14049  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    14050  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    14051  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    14052  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    14053  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    14054 
    14055  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    14056  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    14057  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    14058 
    14059  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    14060  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    14061  {
    14062  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    14063  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    14064  }
    14065  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    14066  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    14067  {
    14068  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    14069  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    14070  }
    14071 
    14072  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    14073 
    14074  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    14075  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    14076  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    14077  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    14078  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    14079  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    14080  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    14081  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    14082  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14083 
    14084  fprintf(m_File, "Config,End\n");
    14085 }
    14086 
    14087 void VmaRecorder::GetBasicParams(CallParams& outParams)
    14088 {
    14089  outParams.threadId = GetCurrentThreadId();
    14090 
    14091  LARGE_INTEGER counter;
    14092  QueryPerformanceCounter(&counter);
    14093  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    14094 }
    14095 
    14096 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
    14097 {
    14098  if(count)
    14099  {
    14100  fprintf(m_File, "%p", pItems[0]);
    14101  for(uint64_t i = 1; i < count; ++i)
    14102  {
    14103  fprintf(m_File, " %p", pItems[i]);
    14104  }
    14105  }
    14106 }
    14107 
    14108 void VmaRecorder::Flush()
    14109 {
    14110  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    14111  {
    14112  fflush(m_File);
    14113  }
    14114 }
    14115 
    14116 #endif // #if VMA_RECORDING_ENABLED
    14117 
    14119 // VmaAllocationObjectAllocator
    14120 
    14121 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
    14122  m_Allocator(pAllocationCallbacks, 1024)
    14123 {
    14124 }
    14125 
    14126 VmaAllocation VmaAllocationObjectAllocator::Allocate()
    14127 {
    14128  VmaMutexLock mutexLock(m_Mutex);
    14129  return m_Allocator.Alloc();
    14130 }
    14131 
    14132 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
    14133 {
    14134  VmaMutexLock mutexLock(m_Mutex);
    14135  m_Allocator.Free(hAlloc);
    14136 }
    14137 
    14139 // VmaAllocator_T
    14140 
    14141 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    14142  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    14143  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    14144  m_hDevice(pCreateInfo->device),
    14145  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    14146  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    14147  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    14148  m_AllocationObjectAllocator(&m_AllocationCallbacks),
    14149  m_PreferredLargeHeapBlockSize(0),
    14150  m_PhysicalDevice(pCreateInfo->physicalDevice),
    14151  m_CurrentFrameIndex(0),
    14152  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    14153  m_NextPoolId(0)
    14155  ,m_pRecorder(VMA_NULL)
    14156 #endif
    14157 {
    14158  if(VMA_DEBUG_DETECT_CORRUPTION)
    14159  {
    14160  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    14161  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    14162  }
    14163 
    14164  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    14165 
    14166 #if !(VMA_DEDICATED_ALLOCATION)
    14168  {
    14169  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    14170  }
    14171 #endif
    14172 
    14173  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    14174  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    14175  memset(&m_MemProps, 0, sizeof(m_MemProps));
    14176 
    14177  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    14178  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    14179 
    14180  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14181  {
    14182  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    14183  }
    14184 
    14185  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    14186  {
    14187  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    14188  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    14189  }
    14190 
    14191  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    14192 
    14193  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    14194  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    14195 
    14196  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    14197  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    14198  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    14199  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    14200 
    14201  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    14202  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14203 
    14204  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    14205  {
    14206  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    14207  {
    14208  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    14209  if(limit != VK_WHOLE_SIZE)
    14210  {
    14211  m_HeapSizeLimit[heapIndex] = limit;
    14212  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    14213  {
    14214  m_MemProps.memoryHeaps[heapIndex].size = limit;
    14215  }
    14216  }
    14217  }
    14218  }
    14219 
    14220  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14221  {
    14222  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    14223 
    14224  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    14225  this,
    14226  VK_NULL_HANDLE, // hParentPool
    14227  memTypeIndex,
    14228  preferredBlockSize,
    14229  0,
    14230  SIZE_MAX,
    14231  GetBufferImageGranularity(),
    14232  pCreateInfo->frameInUseCount,
    14233  false, // isCustomPool
    14234  false, // explicitBlockSize
    14235  false); // linearAlgorithm
    14236  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    14237  // becase minBlockCount is 0.
    14238  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    14239 
    14240  }
    14241 }
    14242 
    14243 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    14244 {
    14245  VkResult res = VK_SUCCESS;
    14246 
    14247  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    14248  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    14249  {
    14250 #if VMA_RECORDING_ENABLED
    14251  m_pRecorder = vma_new(this, VmaRecorder)();
    14252  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    14253  if(res != VK_SUCCESS)
    14254  {
    14255  return res;
    14256  }
    14257  m_pRecorder->WriteConfiguration(
    14258  m_PhysicalDeviceProperties,
    14259  m_MemProps,
    14260  m_UseKhrDedicatedAllocation);
    14261  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    14262 #else
    14263  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    14264  return VK_ERROR_FEATURE_NOT_PRESENT;
    14265 #endif
    14266  }
    14267 
    14268  return res;
    14269 }
    14270 
    14271 VmaAllocator_T::~VmaAllocator_T()
    14272 {
    14273 #if VMA_RECORDING_ENABLED
    14274  if(m_pRecorder != VMA_NULL)
    14275  {
    14276  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    14277  vma_delete(this, m_pRecorder);
    14278  }
    14279 #endif
    14280 
    14281  VMA_ASSERT(m_Pools.empty());
    14282 
    14283  for(size_t i = GetMemoryTypeCount(); i--; )
    14284  {
    14285  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
    14286  {
    14287  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
    14288  }
    14289 
    14290  vma_delete(this, m_pDedicatedAllocations[i]);
    14291  vma_delete(this, m_pBlockVectors[i]);
    14292  }
    14293 }
    14294 
    14295 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    14296 {
    14297 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14298  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
    14299  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
    14300  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
    14301  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
    14302  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
    14303  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
    14304  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
    14305  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
    14306  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
    14307  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
    14308  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
    14309  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
    14310  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
    14311  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
    14312  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
    14313  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
    14314  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
    14315 #if VMA_DEDICATED_ALLOCATION
    14316  if(m_UseKhrDedicatedAllocation)
    14317  {
    14318  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    14319  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    14320  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    14321  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    14322  }
    14323 #endif // #if VMA_DEDICATED_ALLOCATION
    14324 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14325 
    14326 #define VMA_COPY_IF_NOT_NULL(funcName) \
    14327  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    14328 
    14329  if(pVulkanFunctions != VMA_NULL)
    14330  {
    14331  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    14332  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    14333  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    14334  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    14335  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    14336  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    14337  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    14338  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    14339  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    14340  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    14341  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    14342  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    14343  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    14344  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    14345  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    14346  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    14347  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
    14348 #if VMA_DEDICATED_ALLOCATION
    14349  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    14350  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    14351 #endif
    14352  }
    14353 
    14354 #undef VMA_COPY_IF_NOT_NULL
    14355 
    14356  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    14357  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    14358  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    14359  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    14360  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    14361  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    14362  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    14363  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    14364  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    14365  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    14366  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    14367  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    14368  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    14369  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    14370  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    14371  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    14372  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    14373  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    14374  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
    14375 #if VMA_DEDICATED_ALLOCATION
    14376  if(m_UseKhrDedicatedAllocation)
    14377  {
    14378  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    14379  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    14380  }
    14381 #endif
    14382 }
    14383 
    14384 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    14385 {
    14386  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14387  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    14388  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    14389  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    14390 }
    14391 
    14392 VkResult VmaAllocator_T::AllocateMemoryOfType(
    14393  VkDeviceSize size,
    14394  VkDeviceSize alignment,
    14395  bool dedicatedAllocation,
    14396  VkBuffer dedicatedBuffer,
    14397  VkImage dedicatedImage,
    14398  const VmaAllocationCreateInfo& createInfo,
    14399  uint32_t memTypeIndex,
    14400  VmaSuballocationType suballocType,
    14401  size_t allocationCount,
    14402  VmaAllocation* pAllocations)
    14403 {
    14404  VMA_ASSERT(pAllocations != VMA_NULL);
    14405  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
    14406 
    14407  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    14408 
    14409  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14410  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14411  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14412  {
    14413  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14414  }
    14415 
    14416  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    14417  VMA_ASSERT(blockVector);
    14418 
    14419  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    14420  bool preferDedicatedMemory =
    14421  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    14422  dedicatedAllocation ||
    14423  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    14424  size > preferredBlockSize / 2;
    14425 
    14426  if(preferDedicatedMemory &&
    14427  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    14428  finalCreateInfo.pool == VK_NULL_HANDLE)
    14429  {
    14431  }
    14432 
    14433  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    14434  {
    14435  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14436  {
    14437  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14438  }
    14439  else
    14440  {
    14441  return AllocateDedicatedMemory(
    14442  size,
    14443  suballocType,
    14444  memTypeIndex,
    14445  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14446  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14447  finalCreateInfo.pUserData,
    14448  dedicatedBuffer,
    14449  dedicatedImage,
    14450  allocationCount,
    14451  pAllocations);
    14452  }
    14453  }
    14454  else
    14455  {
    14456  VkResult res = blockVector->Allocate(
    14457  m_CurrentFrameIndex.load(),
    14458  size,
    14459  alignment,
    14460  finalCreateInfo,
    14461  suballocType,
    14462  allocationCount,
    14463  pAllocations);
    14464  if(res == VK_SUCCESS)
    14465  {
    14466  return res;
    14467  }
    14468 
    14469  // 5. Try dedicated memory.
    14470  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14471  {
    14472  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14473  }
    14474  else
    14475  {
    14476  res = AllocateDedicatedMemory(
    14477  size,
    14478  suballocType,
    14479  memTypeIndex,
    14480  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14481  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14482  finalCreateInfo.pUserData,
    14483  dedicatedBuffer,
    14484  dedicatedImage,
    14485  allocationCount,
    14486  pAllocations);
    14487  if(res == VK_SUCCESS)
    14488  {
    14489  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    14490  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    14491  return VK_SUCCESS;
    14492  }
    14493  else
    14494  {
    14495  // Everything failed: Return error code.
    14496  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14497  return res;
    14498  }
    14499  }
    14500  }
    14501 }
    14502 
    14503 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    14504  VkDeviceSize size,
    14505  VmaSuballocationType suballocType,
    14506  uint32_t memTypeIndex,
    14507  bool map,
    14508  bool isUserDataString,
    14509  void* pUserData,
    14510  VkBuffer dedicatedBuffer,
    14511  VkImage dedicatedImage,
    14512  size_t allocationCount,
    14513  VmaAllocation* pAllocations)
    14514 {
    14515  VMA_ASSERT(allocationCount > 0 && pAllocations);
    14516 
    14517  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    14518  allocInfo.memoryTypeIndex = memTypeIndex;
    14519  allocInfo.allocationSize = size;
    14520 
    14521 #if VMA_DEDICATED_ALLOCATION
    14522  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    14523  if(m_UseKhrDedicatedAllocation)
    14524  {
    14525  if(dedicatedBuffer != VK_NULL_HANDLE)
    14526  {
    14527  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    14528  dedicatedAllocInfo.buffer = dedicatedBuffer;
    14529  allocInfo.pNext = &dedicatedAllocInfo;
    14530  }
    14531  else if(dedicatedImage != VK_NULL_HANDLE)
    14532  {
    14533  dedicatedAllocInfo.image = dedicatedImage;
    14534  allocInfo.pNext = &dedicatedAllocInfo;
    14535  }
    14536  }
    14537 #endif // #if VMA_DEDICATED_ALLOCATION
    14538 
    14539  size_t allocIndex;
    14540  VkResult res = VK_SUCCESS;
    14541  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14542  {
    14543  res = AllocateDedicatedMemoryPage(
    14544  size,
    14545  suballocType,
    14546  memTypeIndex,
    14547  allocInfo,
    14548  map,
    14549  isUserDataString,
    14550  pUserData,
    14551  pAllocations + allocIndex);
    14552  if(res != VK_SUCCESS)
    14553  {
    14554  break;
    14555  }
    14556  }
    14557 
    14558  if(res == VK_SUCCESS)
    14559  {
    14560  // Register them in m_pDedicatedAllocations.
    14561  {
    14562  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14563  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    14564  VMA_ASSERT(pDedicatedAllocations);
    14565  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14566  {
    14567  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
    14568  }
    14569  }
    14570 
    14571  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
    14572  }
    14573  else
    14574  {
    14575  // Free all already created allocations.
    14576  while(allocIndex--)
    14577  {
    14578  VmaAllocation currAlloc = pAllocations[allocIndex];
    14579  VkDeviceMemory hMemory = currAlloc->GetMemory();
    14580 
    14581  /*
    14582  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    14583  before vkFreeMemory.
    14584 
    14585  if(currAlloc->GetMappedData() != VMA_NULL)
    14586  {
    14587  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    14588  }
    14589  */
    14590 
    14591  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
    14592 
    14593  currAlloc->SetUserData(this, VMA_NULL);
    14594  currAlloc->Dtor();
    14595  m_AllocationObjectAllocator.Free(currAlloc);
    14596  }
    14597 
    14598  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14599  }
    14600 
    14601  return res;
    14602 }
    14603 
    14604 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
    14605  VkDeviceSize size,
    14606  VmaSuballocationType suballocType,
    14607  uint32_t memTypeIndex,
    14608  const VkMemoryAllocateInfo& allocInfo,
    14609  bool map,
    14610  bool isUserDataString,
    14611  void* pUserData,
    14612  VmaAllocation* pAllocation)
    14613 {
    14614  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    14615  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    14616  if(res < 0)
    14617  {
    14618  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14619  return res;
    14620  }
    14621 
    14622  void* pMappedData = VMA_NULL;
    14623  if(map)
    14624  {
    14625  res = (*m_VulkanFunctions.vkMapMemory)(
    14626  m_hDevice,
    14627  hMemory,
    14628  0,
    14629  VK_WHOLE_SIZE,
    14630  0,
    14631  &pMappedData);
    14632  if(res < 0)
    14633  {
    14634  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    14635  FreeVulkanMemory(memTypeIndex, size, hMemory);
    14636  return res;
    14637  }
    14638  }
    14639 
    14640  *pAllocation = m_AllocationObjectAllocator.Allocate();
    14641  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
    14642  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    14643  (*pAllocation)->SetUserData(this, pUserData);
    14644  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14645  {
    14646  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    14647  }
    14648 
    14649  return VK_SUCCESS;
    14650 }
    14651 
    14652 void VmaAllocator_T::GetBufferMemoryRequirements(
    14653  VkBuffer hBuffer,
    14654  VkMemoryRequirements& memReq,
    14655  bool& requiresDedicatedAllocation,
    14656  bool& prefersDedicatedAllocation) const
    14657 {
    14658 #if VMA_DEDICATED_ALLOCATION
    14659  if(m_UseKhrDedicatedAllocation)
    14660  {
    14661  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14662  memReqInfo.buffer = hBuffer;
    14663 
    14664  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14665 
    14666  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14667  memReq2.pNext = &memDedicatedReq;
    14668 
    14669  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14670 
    14671  memReq = memReq2.memoryRequirements;
    14672  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14673  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14674  }
    14675  else
    14676 #endif // #if VMA_DEDICATED_ALLOCATION
    14677  {
    14678  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    14679  requiresDedicatedAllocation = false;
    14680  prefersDedicatedAllocation = false;
    14681  }
    14682 }
    14683 
    14684 void VmaAllocator_T::GetImageMemoryRequirements(
    14685  VkImage hImage,
    14686  VkMemoryRequirements& memReq,
    14687  bool& requiresDedicatedAllocation,
    14688  bool& prefersDedicatedAllocation) const
    14689 {
    14690 #if VMA_DEDICATED_ALLOCATION
    14691  if(m_UseKhrDedicatedAllocation)
    14692  {
    14693  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14694  memReqInfo.image = hImage;
    14695 
    14696  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14697 
    14698  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14699  memReq2.pNext = &memDedicatedReq;
    14700 
    14701  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14702 
    14703  memReq = memReq2.memoryRequirements;
    14704  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14705  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14706  }
    14707  else
    14708 #endif // #if VMA_DEDICATED_ALLOCATION
    14709  {
    14710  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    14711  requiresDedicatedAllocation = false;
    14712  prefersDedicatedAllocation = false;
    14713  }
    14714 }
    14715 
    14716 VkResult VmaAllocator_T::AllocateMemory(
    14717  const VkMemoryRequirements& vkMemReq,
    14718  bool requiresDedicatedAllocation,
    14719  bool prefersDedicatedAllocation,
    14720  VkBuffer dedicatedBuffer,
    14721  VkImage dedicatedImage,
    14722  const VmaAllocationCreateInfo& createInfo,
    14723  VmaSuballocationType suballocType,
    14724  size_t allocationCount,
    14725  VmaAllocation* pAllocations)
    14726 {
    14727  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14728 
    14729  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    14730 
    14731  if(vkMemReq.size == 0)
    14732  {
    14733  return VK_ERROR_VALIDATION_FAILED_EXT;
    14734  }
    14735  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    14736  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14737  {
    14738  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    14739  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14740  }
    14741  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14743  {
    14744  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    14745  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14746  }
    14747  if(requiresDedicatedAllocation)
    14748  {
    14749  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14750  {
    14751  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    14752  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14753  }
    14754  if(createInfo.pool != VK_NULL_HANDLE)
    14755  {
    14756  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    14757  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14758  }
    14759  }
    14760  if((createInfo.pool != VK_NULL_HANDLE) &&
    14761  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    14762  {
    14763  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    14764  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14765  }
    14766 
    14767  if(createInfo.pool != VK_NULL_HANDLE)
    14768  {
    14769  const VkDeviceSize alignmentForPool = VMA_MAX(
    14770  vkMemReq.alignment,
    14771  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    14772  return createInfo.pool->m_BlockVector.Allocate(
    14773  m_CurrentFrameIndex.load(),
    14774  vkMemReq.size,
    14775  alignmentForPool,
    14776  createInfo,
    14777  suballocType,
    14778  allocationCount,
    14779  pAllocations);
    14780  }
    14781  else
    14782  {
    14783  // Bit mask of memory Vulkan types acceptable for this allocation.
    14784  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    14785  uint32_t memTypeIndex = UINT32_MAX;
    14786  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14787  if(res == VK_SUCCESS)
    14788  {
    14789  VkDeviceSize alignmentForMemType = VMA_MAX(
    14790  vkMemReq.alignment,
    14791  GetMemoryTypeMinAlignment(memTypeIndex));
    14792 
    14793  res = AllocateMemoryOfType(
    14794  vkMemReq.size,
    14795  alignmentForMemType,
    14796  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14797  dedicatedBuffer,
    14798  dedicatedImage,
    14799  createInfo,
    14800  memTypeIndex,
    14801  suballocType,
    14802  allocationCount,
    14803  pAllocations);
    14804  // Succeeded on first try.
    14805  if(res == VK_SUCCESS)
    14806  {
    14807  return res;
    14808  }
    14809  // Allocation from this memory type failed. Try other compatible memory types.
    14810  else
    14811  {
    14812  for(;;)
    14813  {
    14814  // Remove old memTypeIndex from list of possibilities.
    14815  memoryTypeBits &= ~(1u << memTypeIndex);
    14816  // Find alternative memTypeIndex.
    14817  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14818  if(res == VK_SUCCESS)
    14819  {
    14820  alignmentForMemType = VMA_MAX(
    14821  vkMemReq.alignment,
    14822  GetMemoryTypeMinAlignment(memTypeIndex));
    14823 
    14824  res = AllocateMemoryOfType(
    14825  vkMemReq.size,
    14826  alignmentForMemType,
    14827  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14828  dedicatedBuffer,
    14829  dedicatedImage,
    14830  createInfo,
    14831  memTypeIndex,
    14832  suballocType,
    14833  allocationCount,
    14834  pAllocations);
    14835  // Allocation from this alternative memory type succeeded.
    14836  if(res == VK_SUCCESS)
    14837  {
    14838  return res;
    14839  }
    14840  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    14841  }
    14842  // No other matching memory type index could be found.
    14843  else
    14844  {
    14845  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    14846  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14847  }
    14848  }
    14849  }
    14850  }
    14851  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    14852  else
    14853  return res;
    14854  }
    14855 }
    14856 
    14857 void VmaAllocator_T::FreeMemory(
    14858  size_t allocationCount,
    14859  const VmaAllocation* pAllocations)
    14860 {
    14861  VMA_ASSERT(pAllocations);
    14862 
    14863  for(size_t allocIndex = allocationCount; allocIndex--; )
    14864  {
    14865  VmaAllocation allocation = pAllocations[allocIndex];
    14866 
    14867  if(allocation != VK_NULL_HANDLE)
    14868  {
    14869  if(TouchAllocation(allocation))
    14870  {
    14871  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14872  {
    14873  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    14874  }
    14875 
    14876  switch(allocation->GetType())
    14877  {
    14878  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14879  {
    14880  VmaBlockVector* pBlockVector = VMA_NULL;
    14881  VmaPool hPool = allocation->GetBlock()->GetParentPool();
    14882  if(hPool != VK_NULL_HANDLE)
    14883  {
    14884  pBlockVector = &hPool->m_BlockVector;
    14885  }
    14886  else
    14887  {
    14888  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    14889  pBlockVector = m_pBlockVectors[memTypeIndex];
    14890  }
    14891  pBlockVector->Free(allocation);
    14892  }
    14893  break;
    14894  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14895  FreeDedicatedMemory(allocation);
    14896  break;
    14897  default:
    14898  VMA_ASSERT(0);
    14899  }
    14900  }
    14901 
    14902  allocation->SetUserData(this, VMA_NULL);
    14903  allocation->Dtor();
    14904  m_AllocationObjectAllocator.Free(allocation);
    14905  }
    14906  }
    14907 }
    14908 
    14909 VkResult VmaAllocator_T::ResizeAllocation(
    14910  const VmaAllocation alloc,
    14911  VkDeviceSize newSize)
    14912 {
    14913  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    14914  {
    14915  return VK_ERROR_VALIDATION_FAILED_EXT;
    14916  }
    14917  if(newSize == alloc->GetSize())
    14918  {
    14919  return VK_SUCCESS;
    14920  }
    14921 
    14922  switch(alloc->GetType())
    14923  {
    14924  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14925  return VK_ERROR_FEATURE_NOT_PRESENT;
    14926  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14927  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    14928  {
    14929  alloc->ChangeSize(newSize);
    14930  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    14931  return VK_SUCCESS;
    14932  }
    14933  else
    14934  {
    14935  return VK_ERROR_OUT_OF_POOL_MEMORY;
    14936  }
    14937  default:
    14938  VMA_ASSERT(0);
    14939  return VK_ERROR_VALIDATION_FAILED_EXT;
    14940  }
    14941 }
    14942 
    14943 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    14944 {
    14945  // Initialize.
    14946  InitStatInfo(pStats->total);
    14947  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    14948  InitStatInfo(pStats->memoryType[i]);
    14949  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14950  InitStatInfo(pStats->memoryHeap[i]);
    14951 
    14952  // Process default pools.
    14953  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14954  {
    14955  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    14956  VMA_ASSERT(pBlockVector);
    14957  pBlockVector->AddStats(pStats);
    14958  }
    14959 
    14960  // Process custom pools.
    14961  {
    14962  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    14963  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    14964  {
    14965  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    14966  }
    14967  }
    14968 
    14969  // Process dedicated allocations.
    14970  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14971  {
    14972  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14973  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14974  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    14975  VMA_ASSERT(pDedicatedAllocVector);
    14976  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    14977  {
    14978  VmaStatInfo allocationStatInfo;
    14979  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    14980  VmaAddStatInfo(pStats->total, allocationStatInfo);
    14981  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    14982  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    14983  }
    14984  }
    14985 
    14986  // Postprocess.
    14987  VmaPostprocessCalcStatInfo(pStats->total);
    14988  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    14989  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    14990  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    14991  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    14992 }
    14993 
    14994 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    14995 
    14996 VkResult VmaAllocator_T::DefragmentationBegin(
    14997  const VmaDefragmentationInfo2& info,
    14998  VmaDefragmentationStats* pStats,
    14999  VmaDefragmentationContext* pContext)
    15000 {
    15001  if(info.pAllocationsChanged != VMA_NULL)
    15002  {
    15003  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
    15004  }
    15005 
    15006  *pContext = vma_new(this, VmaDefragmentationContext_T)(
    15007  this, m_CurrentFrameIndex.load(), info.flags, pStats);
    15008 
    15009  (*pContext)->AddPools(info.poolCount, info.pPools);
    15010  (*pContext)->AddAllocations(
    15012 
    15013  VkResult res = (*pContext)->Defragment(
    15016  info.commandBuffer, pStats);
    15017 
    15018  if(res != VK_NOT_READY)
    15019  {
    15020  vma_delete(this, *pContext);
    15021  *pContext = VMA_NULL;
    15022  }
    15023 
    15024  return res;
    15025 }
    15026 
    15027 VkResult VmaAllocator_T::DefragmentationEnd(
    15028  VmaDefragmentationContext context)
    15029 {
    15030  vma_delete(this, context);
    15031  return VK_SUCCESS;
    15032 }
    15033 
    15034 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    15035 {
    15036  if(hAllocation->CanBecomeLost())
    15037  {
    15038  /*
    15039  Warning: This is a carefully designed algorithm.
    15040  Do not modify unless you really know what you're doing :)
    15041  */
    15042  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15043  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15044  for(;;)
    15045  {
    15046  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15047  {
    15048  pAllocationInfo->memoryType = UINT32_MAX;
    15049  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    15050  pAllocationInfo->offset = 0;
    15051  pAllocationInfo->size = hAllocation->GetSize();
    15052  pAllocationInfo->pMappedData = VMA_NULL;
    15053  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15054  return;
    15055  }
    15056  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15057  {
    15058  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15059  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15060  pAllocationInfo->offset = hAllocation->GetOffset();
    15061  pAllocationInfo->size = hAllocation->GetSize();
    15062  pAllocationInfo->pMappedData = VMA_NULL;
    15063  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15064  return;
    15065  }
    15066  else // Last use time earlier than current time.
    15067  {
    15068  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15069  {
    15070  localLastUseFrameIndex = localCurrFrameIndex;
    15071  }
    15072  }
    15073  }
    15074  }
    15075  else
    15076  {
    15077 #if VMA_STATS_STRING_ENABLED
    15078  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15079  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15080  for(;;)
    15081  {
    15082  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15083  if(localLastUseFrameIndex == localCurrFrameIndex)
    15084  {
    15085  break;
    15086  }
    15087  else // Last use time earlier than current time.
    15088  {
    15089  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15090  {
    15091  localLastUseFrameIndex = localCurrFrameIndex;
    15092  }
    15093  }
    15094  }
    15095 #endif
    15096 
    15097  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15098  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15099  pAllocationInfo->offset = hAllocation->GetOffset();
    15100  pAllocationInfo->size = hAllocation->GetSize();
    15101  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    15102  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15103  }
    15104 }
    15105 
    15106 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    15107 {
    15108  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    15109  if(hAllocation->CanBecomeLost())
    15110  {
    15111  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15112  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15113  for(;;)
    15114  {
    15115  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15116  {
    15117  return false;
    15118  }
    15119  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15120  {
    15121  return true;
    15122  }
    15123  else // Last use time earlier than current time.
    15124  {
    15125  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15126  {
    15127  localLastUseFrameIndex = localCurrFrameIndex;
    15128  }
    15129  }
    15130  }
    15131  }
    15132  else
    15133  {
    15134 #if VMA_STATS_STRING_ENABLED
    15135  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15136  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15137  for(;;)
    15138  {
    15139  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15140  if(localLastUseFrameIndex == localCurrFrameIndex)
    15141  {
    15142  break;
    15143  }
    15144  else // Last use time earlier than current time.
    15145  {
    15146  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15147  {
    15148  localLastUseFrameIndex = localCurrFrameIndex;
    15149  }
    15150  }
    15151  }
    15152 #endif
    15153 
    15154  return true;
    15155  }
    15156 }
    15157 
    15158 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    15159 {
    15160  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    15161 
    15162  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    15163 
    15164  if(newCreateInfo.maxBlockCount == 0)
    15165  {
    15166  newCreateInfo.maxBlockCount = SIZE_MAX;
    15167  }
    15168  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    15169  {
    15170  return VK_ERROR_INITIALIZATION_FAILED;
    15171  }
    15172 
    15173  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    15174 
    15175  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    15176 
    15177  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    15178  if(res != VK_SUCCESS)
    15179  {
    15180  vma_delete(this, *pPool);
    15181  *pPool = VMA_NULL;
    15182  return res;
    15183  }
    15184 
    15185  // Add to m_Pools.
    15186  {
    15187  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15188  (*pPool)->SetId(m_NextPoolId++);
    15189  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    15190  }
    15191 
    15192  return VK_SUCCESS;
    15193 }
    15194 
    15195 void VmaAllocator_T::DestroyPool(VmaPool pool)
    15196 {
    15197  // Remove from m_Pools.
    15198  {
    15199  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15200  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    15201  VMA_ASSERT(success && "Pool not found in Allocator.");
    15202  }
    15203 
    15204  vma_delete(this, pool);
    15205 }
    15206 
    15207 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    15208 {
    15209  pool->m_BlockVector.GetPoolStats(pPoolStats);
    15210 }
    15211 
    15212 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    15213 {
    15214  m_CurrentFrameIndex.store(frameIndex);
    15215 }
    15216 
    15217 void VmaAllocator_T::MakePoolAllocationsLost(
    15218  VmaPool hPool,
    15219  size_t* pLostAllocationCount)
    15220 {
    15221  hPool->m_BlockVector.MakePoolAllocationsLost(
    15222  m_CurrentFrameIndex.load(),
    15223  pLostAllocationCount);
    15224 }
    15225 
    15226 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    15227 {
    15228  return hPool->m_BlockVector.CheckCorruption();
    15229 }
    15230 
    15231 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    15232 {
    15233  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    15234 
    15235  // Process default pools.
    15236  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15237  {
    15238  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    15239  {
    15240  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15241  VMA_ASSERT(pBlockVector);
    15242  VkResult localRes = pBlockVector->CheckCorruption();
    15243  switch(localRes)
    15244  {
    15245  case VK_ERROR_FEATURE_NOT_PRESENT:
    15246  break;
    15247  case VK_SUCCESS:
    15248  finalRes = VK_SUCCESS;
    15249  break;
    15250  default:
    15251  return localRes;
    15252  }
    15253  }
    15254  }
    15255 
    15256  // Process custom pools.
    15257  {
    15258  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15259  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15260  {
    15261  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    15262  {
    15263  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    15264  switch(localRes)
    15265  {
    15266  case VK_ERROR_FEATURE_NOT_PRESENT:
    15267  break;
    15268  case VK_SUCCESS:
    15269  finalRes = VK_SUCCESS;
    15270  break;
    15271  default:
    15272  return localRes;
    15273  }
    15274  }
    15275  }
    15276  }
    15277 
    15278  return finalRes;
    15279 }
    15280 
    15281 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    15282 {
    15283  *pAllocation = m_AllocationObjectAllocator.Allocate();
    15284  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
    15285  (*pAllocation)->InitLost();
    15286 }
    15287 
    15288 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    15289 {
    15290  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    15291 
    15292  VkResult res;
    15293  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15294  {
    15295  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15296  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    15297  {
    15298  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15299  if(res == VK_SUCCESS)
    15300  {
    15301  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    15302  }
    15303  }
    15304  else
    15305  {
    15306  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    15307  }
    15308  }
    15309  else
    15310  {
    15311  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15312  }
    15313 
    15314  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    15315  {
    15316  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    15317  }
    15318 
    15319  return res;
    15320 }
    15321 
    15322 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    15323 {
    15324  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    15325  {
    15326  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    15327  }
    15328 
    15329  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    15330 
    15331  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    15332  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15333  {
    15334  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15335  m_HeapSizeLimit[heapIndex] += size;
    15336  }
    15337 }
    15338 
    15339 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    15340 {
    15341  if(hAllocation->CanBecomeLost())
    15342  {
    15343  return VK_ERROR_MEMORY_MAP_FAILED;
    15344  }
    15345 
    15346  switch(hAllocation->GetType())
    15347  {
    15348  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15349  {
    15350  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15351  char *pBytes = VMA_NULL;
    15352  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    15353  if(res == VK_SUCCESS)
    15354  {
    15355  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    15356  hAllocation->BlockAllocMap();
    15357  }
    15358  return res;
    15359  }
    15360  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15361  return hAllocation->DedicatedAllocMap(this, ppData);
    15362  default:
    15363  VMA_ASSERT(0);
    15364  return VK_ERROR_MEMORY_MAP_FAILED;
    15365  }
    15366 }
    15367 
    15368 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    15369 {
    15370  switch(hAllocation->GetType())
    15371  {
    15372  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15373  {
    15374  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15375  hAllocation->BlockAllocUnmap();
    15376  pBlock->Unmap(this, 1);
    15377  }
    15378  break;
    15379  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15380  hAllocation->DedicatedAllocUnmap(this);
    15381  break;
    15382  default:
    15383  VMA_ASSERT(0);
    15384  }
    15385 }
    15386 
    15387 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    15388 {
    15389  VkResult res = VK_SUCCESS;
    15390  switch(hAllocation->GetType())
    15391  {
    15392  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15393  res = GetVulkanFunctions().vkBindBufferMemory(
    15394  m_hDevice,
    15395  hBuffer,
    15396  hAllocation->GetMemory(),
    15397  0); //memoryOffset
    15398  break;
    15399  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15400  {
    15401  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15402  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    15403  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    15404  break;
    15405  }
    15406  default:
    15407  VMA_ASSERT(0);
    15408  }
    15409  return res;
    15410 }
    15411 
    15412 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    15413 {
    15414  VkResult res = VK_SUCCESS;
    15415  switch(hAllocation->GetType())
    15416  {
    15417  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15418  res = GetVulkanFunctions().vkBindImageMemory(
    15419  m_hDevice,
    15420  hImage,
    15421  hAllocation->GetMemory(),
    15422  0); //memoryOffset
    15423  break;
    15424  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15425  {
    15426  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15427  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    15428  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    15429  break;
    15430  }
    15431  default:
    15432  VMA_ASSERT(0);
    15433  }
    15434  return res;
    15435 }
    15436 
    15437 void VmaAllocator_T::FlushOrInvalidateAllocation(
    15438  VmaAllocation hAllocation,
    15439  VkDeviceSize offset, VkDeviceSize size,
    15440  VMA_CACHE_OPERATION op)
    15441 {
    15442  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    15443  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    15444  {
    15445  const VkDeviceSize allocationSize = hAllocation->GetSize();
    15446  VMA_ASSERT(offset <= allocationSize);
    15447 
    15448  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    15449 
    15450  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    15451  memRange.memory = hAllocation->GetMemory();
    15452 
    15453  switch(hAllocation->GetType())
    15454  {
    15455  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15456  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15457  if(size == VK_WHOLE_SIZE)
    15458  {
    15459  memRange.size = allocationSize - memRange.offset;
    15460  }
    15461  else
    15462  {
    15463  VMA_ASSERT(offset + size <= allocationSize);
    15464  memRange.size = VMA_MIN(
    15465  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    15466  allocationSize - memRange.offset);
    15467  }
    15468  break;
    15469 
    15470  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15471  {
    15472  // 1. Still within this allocation.
    15473  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15474  if(size == VK_WHOLE_SIZE)
    15475  {
    15476  size = allocationSize - offset;
    15477  }
    15478  else
    15479  {
    15480  VMA_ASSERT(offset + size <= allocationSize);
    15481  }
    15482  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    15483 
    15484  // 2. Adjust to whole block.
    15485  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    15486  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    15487  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    15488  memRange.offset += allocationOffset;
    15489  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    15490 
    15491  break;
    15492  }
    15493 
    15494  default:
    15495  VMA_ASSERT(0);
    15496  }
    15497 
    15498  switch(op)
    15499  {
    15500  case VMA_CACHE_FLUSH:
    15501  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15502  break;
    15503  case VMA_CACHE_INVALIDATE:
    15504  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15505  break;
    15506  default:
    15507  VMA_ASSERT(0);
    15508  }
    15509  }
    15510  // else: Just ignore this call.
    15511 }
    15512 
    15513 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    15514 {
    15515  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    15516 
    15517  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    15518  {
    15519  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15520  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    15521  VMA_ASSERT(pDedicatedAllocations);
    15522  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    15523  VMA_ASSERT(success);
    15524  }
    15525 
    15526  VkDeviceMemory hMemory = allocation->GetMemory();
    15527 
    15528  /*
    15529  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    15530  before vkFreeMemory.
    15531 
    15532  if(allocation->GetMappedData() != VMA_NULL)
    15533  {
    15534  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    15535  }
    15536  */
    15537 
    15538  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    15539 
    15540  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    15541 }
    15542 
    15543 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    15544 {
    15545  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    15546  !hAllocation->CanBecomeLost() &&
    15547  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15548  {
    15549  void* pData = VMA_NULL;
    15550  VkResult res = Map(hAllocation, &pData);
    15551  if(res == VK_SUCCESS)
    15552  {
    15553  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    15554  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    15555  Unmap(hAllocation);
    15556  }
    15557  else
    15558  {
    15559  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    15560  }
    15561  }
    15562 }
    15563 
    15564 #if VMA_STATS_STRING_ENABLED
    15565 
    15566 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    15567 {
    15568  bool dedicatedAllocationsStarted = false;
    15569  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15570  {
    15571  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15572  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15573  VMA_ASSERT(pDedicatedAllocVector);
    15574  if(pDedicatedAllocVector->empty() == false)
    15575  {
    15576  if(dedicatedAllocationsStarted == false)
    15577  {
    15578  dedicatedAllocationsStarted = true;
    15579  json.WriteString("DedicatedAllocations");
    15580  json.BeginObject();
    15581  }
    15582 
    15583  json.BeginString("Type ");
    15584  json.ContinueString(memTypeIndex);
    15585  json.EndString();
    15586 
    15587  json.BeginArray();
    15588 
    15589  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    15590  {
    15591  json.BeginObject(true);
    15592  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    15593  hAlloc->PrintParameters(json);
    15594  json.EndObject();
    15595  }
    15596 
    15597  json.EndArray();
    15598  }
    15599  }
    15600  if(dedicatedAllocationsStarted)
    15601  {
    15602  json.EndObject();
    15603  }
    15604 
    15605  {
    15606  bool allocationsStarted = false;
    15607  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15608  {
    15609  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    15610  {
    15611  if(allocationsStarted == false)
    15612  {
    15613  allocationsStarted = true;
    15614  json.WriteString("DefaultPools");
    15615  json.BeginObject();
    15616  }
    15617 
    15618  json.BeginString("Type ");
    15619  json.ContinueString(memTypeIndex);
    15620  json.EndString();
    15621 
    15622  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    15623  }
    15624  }
    15625  if(allocationsStarted)
    15626  {
    15627  json.EndObject();
    15628  }
    15629  }
    15630 
    15631  // Custom pools
    15632  {
    15633  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15634  const size_t poolCount = m_Pools.size();
    15635  if(poolCount > 0)
    15636  {
    15637  json.WriteString("Pools");
    15638  json.BeginObject();
    15639  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    15640  {
    15641  json.BeginString();
    15642  json.ContinueString(m_Pools[poolIndex]->GetId());
    15643  json.EndString();
    15644 
    15645  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    15646  }
    15647  json.EndObject();
    15648  }
    15649  }
    15650 }
    15651 
    15652 #endif // #if VMA_STATS_STRING_ENABLED
    15653 
    15655 // Public interface
    15656 
    15657 VkResult vmaCreateAllocator(
    15658  const VmaAllocatorCreateInfo* pCreateInfo,
    15659  VmaAllocator* pAllocator)
    15660 {
    15661  VMA_ASSERT(pCreateInfo && pAllocator);
    15662  VMA_DEBUG_LOG("vmaCreateAllocator");
    15663  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    15664  return (*pAllocator)->Init(pCreateInfo);
    15665 }
    15666 
    15667 void vmaDestroyAllocator(
    15668  VmaAllocator allocator)
    15669 {
    15670  if(allocator != VK_NULL_HANDLE)
    15671  {
    15672  VMA_DEBUG_LOG("vmaDestroyAllocator");
    15673  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    15674  vma_delete(&allocationCallbacks, allocator);
    15675  }
    15676 }
    15677 
    15679  VmaAllocator allocator,
    15680  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    15681 {
    15682  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    15683  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    15684 }
    15685 
    15687  VmaAllocator allocator,
    15688  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    15689 {
    15690  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    15691  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    15692 }
    15693 
    15695  VmaAllocator allocator,
    15696  uint32_t memoryTypeIndex,
    15697  VkMemoryPropertyFlags* pFlags)
    15698 {
    15699  VMA_ASSERT(allocator && pFlags);
    15700  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    15701  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    15702 }
    15703 
    15705  VmaAllocator allocator,
    15706  uint32_t frameIndex)
    15707 {
    15708  VMA_ASSERT(allocator);
    15709  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    15710 
    15711  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15712 
    15713  allocator->SetCurrentFrameIndex(frameIndex);
    15714 }
    15715 
    15716 void vmaCalculateStats(
    15717  VmaAllocator allocator,
    15718  VmaStats* pStats)
    15719 {
    15720  VMA_ASSERT(allocator && pStats);
    15721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15722  allocator->CalculateStats(pStats);
    15723 }
    15724 
    15725 #if VMA_STATS_STRING_ENABLED
    15726 
    15727 void vmaBuildStatsString(
    15728  VmaAllocator allocator,
    15729  char** ppStatsString,
    15730  VkBool32 detailedMap)
    15731 {
    15732  VMA_ASSERT(allocator && ppStatsString);
    15733  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15734 
    15735  VmaStringBuilder sb(allocator);
    15736  {
    15737  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    15738  json.BeginObject();
    15739 
    15740  VmaStats stats;
    15741  allocator->CalculateStats(&stats);
    15742 
    15743  json.WriteString("Total");
    15744  VmaPrintStatInfo(json, stats.total);
    15745 
    15746  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    15747  {
    15748  json.BeginString("Heap ");
    15749  json.ContinueString(heapIndex);
    15750  json.EndString();
    15751  json.BeginObject();
    15752 
    15753  json.WriteString("Size");
    15754  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    15755 
    15756  json.WriteString("Flags");
    15757  json.BeginArray(true);
    15758  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    15759  {
    15760  json.WriteString("DEVICE_LOCAL");
    15761  }
    15762  json.EndArray();
    15763 
    15764  if(stats.memoryHeap[heapIndex].blockCount > 0)
    15765  {
    15766  json.WriteString("Stats");
    15767  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    15768  }
    15769 
    15770  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    15771  {
    15772  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    15773  {
    15774  json.BeginString("Type ");
    15775  json.ContinueString(typeIndex);
    15776  json.EndString();
    15777 
    15778  json.BeginObject();
    15779 
    15780  json.WriteString("Flags");
    15781  json.BeginArray(true);
    15782  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    15783  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    15784  {
    15785  json.WriteString("DEVICE_LOCAL");
    15786  }
    15787  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15788  {
    15789  json.WriteString("HOST_VISIBLE");
    15790  }
    15791  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    15792  {
    15793  json.WriteString("HOST_COHERENT");
    15794  }
    15795  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    15796  {
    15797  json.WriteString("HOST_CACHED");
    15798  }
    15799  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    15800  {
    15801  json.WriteString("LAZILY_ALLOCATED");
    15802  }
    15803  json.EndArray();
    15804 
    15805  if(stats.memoryType[typeIndex].blockCount > 0)
    15806  {
    15807  json.WriteString("Stats");
    15808  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    15809  }
    15810 
    15811  json.EndObject();
    15812  }
    15813  }
    15814 
    15815  json.EndObject();
    15816  }
    15817  if(detailedMap == VK_TRUE)
    15818  {
    15819  allocator->PrintDetailedMap(json);
    15820  }
    15821 
    15822  json.EndObject();
    15823  }
    15824 
    15825  const size_t len = sb.GetLength();
    15826  char* const pChars = vma_new_array(allocator, char, len + 1);
    15827  if(len > 0)
    15828  {
    15829  memcpy(pChars, sb.GetData(), len);
    15830  }
    15831  pChars[len] = '\0';
    15832  *ppStatsString = pChars;
    15833 }
    15834 
    15835 void vmaFreeStatsString(
    15836  VmaAllocator allocator,
    15837  char* pStatsString)
    15838 {
    15839  if(pStatsString != VMA_NULL)
    15840  {
    15841  VMA_ASSERT(allocator);
    15842  size_t len = strlen(pStatsString);
    15843  vma_delete_array(allocator, pStatsString, len + 1);
    15844  }
    15845 }
    15846 
    15847 #endif // #if VMA_STATS_STRING_ENABLED
    15848 
    15849 /*
    15850 This function is not protected by any mutex because it just reads immutable data.
    15851 */
    15852 VkResult vmaFindMemoryTypeIndex(
    15853  VmaAllocator allocator,
    15854  uint32_t memoryTypeBits,
    15855  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15856  uint32_t* pMemoryTypeIndex)
    15857 {
    15858  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15859  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15860  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15861 
    15862  if(pAllocationCreateInfo->memoryTypeBits != 0)
    15863  {
    15864  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    15865  }
    15866 
    15867  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    15868  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    15869 
    15870  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    15871  if(mapped)
    15872  {
    15873  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15874  }
    15875 
    15876  // Convert usage to requiredFlags and preferredFlags.
    15877  switch(pAllocationCreateInfo->usage)
    15878  {
    15880  break;
    15882  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15883  {
    15884  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15885  }
    15886  break;
    15888  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    15889  break;
    15891  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15892  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15893  {
    15894  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15895  }
    15896  break;
    15898  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15899  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    15900  break;
    15901  default:
    15902  break;
    15903  }
    15904 
    15905  *pMemoryTypeIndex = UINT32_MAX;
    15906  uint32_t minCost = UINT32_MAX;
    15907  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    15908  memTypeIndex < allocator->GetMemoryTypeCount();
    15909  ++memTypeIndex, memTypeBit <<= 1)
    15910  {
    15911  // This memory type is acceptable according to memoryTypeBits bitmask.
    15912  if((memTypeBit & memoryTypeBits) != 0)
    15913  {
    15914  const VkMemoryPropertyFlags currFlags =
    15915  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    15916  // This memory type contains requiredFlags.
    15917  if((requiredFlags & ~currFlags) == 0)
    15918  {
    15919  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    15920  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    15921  // Remember memory type with lowest cost.
    15922  if(currCost < minCost)
    15923  {
    15924  *pMemoryTypeIndex = memTypeIndex;
    15925  if(currCost == 0)
    15926  {
    15927  return VK_SUCCESS;
    15928  }
    15929  minCost = currCost;
    15930  }
    15931  }
    15932  }
    15933  }
    15934  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    15935 }
    15936 
    15938  VmaAllocator allocator,
    15939  const VkBufferCreateInfo* pBufferCreateInfo,
    15940  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15941  uint32_t* pMemoryTypeIndex)
    15942 {
    15943  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15944  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    15945  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15946  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15947 
    15948  const VkDevice hDev = allocator->m_hDevice;
    15949  VkBuffer hBuffer = VK_NULL_HANDLE;
    15950  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    15951  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    15952  if(res == VK_SUCCESS)
    15953  {
    15954  VkMemoryRequirements memReq = {};
    15955  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    15956  hDev, hBuffer, &memReq);
    15957 
    15958  res = vmaFindMemoryTypeIndex(
    15959  allocator,
    15960  memReq.memoryTypeBits,
    15961  pAllocationCreateInfo,
    15962  pMemoryTypeIndex);
    15963 
    15964  allocator->GetVulkanFunctions().vkDestroyBuffer(
    15965  hDev, hBuffer, allocator->GetAllocationCallbacks());
    15966  }
    15967  return res;
    15968 }
    15969 
    15971  VmaAllocator allocator,
    15972  const VkImageCreateInfo* pImageCreateInfo,
    15973  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15974  uint32_t* pMemoryTypeIndex)
    15975 {
    15976  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15977  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    15978  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15979  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15980 
    15981  const VkDevice hDev = allocator->m_hDevice;
    15982  VkImage hImage = VK_NULL_HANDLE;
    15983  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    15984  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    15985  if(res == VK_SUCCESS)
    15986  {
    15987  VkMemoryRequirements memReq = {};
    15988  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    15989  hDev, hImage, &memReq);
    15990 
    15991  res = vmaFindMemoryTypeIndex(
    15992  allocator,
    15993  memReq.memoryTypeBits,
    15994  pAllocationCreateInfo,
    15995  pMemoryTypeIndex);
    15996 
    15997  allocator->GetVulkanFunctions().vkDestroyImage(
    15998  hDev, hImage, allocator->GetAllocationCallbacks());
    15999  }
    16000  return res;
    16001 }
    16002 
    16003 VkResult vmaCreatePool(
    16004  VmaAllocator allocator,
    16005  const VmaPoolCreateInfo* pCreateInfo,
    16006  VmaPool* pPool)
    16007 {
    16008  VMA_ASSERT(allocator && pCreateInfo && pPool);
    16009 
    16010  VMA_DEBUG_LOG("vmaCreatePool");
    16011 
    16012  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16013 
    16014  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    16015 
    16016 #if VMA_RECORDING_ENABLED
    16017  if(allocator->GetRecorder() != VMA_NULL)
    16018  {
    16019  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    16020  }
    16021 #endif
    16022 
    16023  return res;
    16024 }
    16025 
    16026 void vmaDestroyPool(
    16027  VmaAllocator allocator,
    16028  VmaPool pool)
    16029 {
    16030  VMA_ASSERT(allocator);
    16031 
    16032  if(pool == VK_NULL_HANDLE)
    16033  {
    16034  return;
    16035  }
    16036 
    16037  VMA_DEBUG_LOG("vmaDestroyPool");
    16038 
    16039  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16040 
    16041 #if VMA_RECORDING_ENABLED
    16042  if(allocator->GetRecorder() != VMA_NULL)
    16043  {
    16044  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    16045  }
    16046 #endif
    16047 
    16048  allocator->DestroyPool(pool);
    16049 }
    16050 
    16051 void vmaGetPoolStats(
    16052  VmaAllocator allocator,
    16053  VmaPool pool,
    16054  VmaPoolStats* pPoolStats)
    16055 {
    16056  VMA_ASSERT(allocator && pool && pPoolStats);
    16057 
    16058  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16059 
    16060  allocator->GetPoolStats(pool, pPoolStats);
    16061 }
    16062 
    16064  VmaAllocator allocator,
    16065  VmaPool pool,
    16066  size_t* pLostAllocationCount)
    16067 {
    16068  VMA_ASSERT(allocator && pool);
    16069 
    16070  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16071 
    16072 #if VMA_RECORDING_ENABLED
    16073  if(allocator->GetRecorder() != VMA_NULL)
    16074  {
    16075  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    16076  }
    16077 #endif
    16078 
    16079  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    16080 }
    16081 
    16082 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    16083 {
    16084  VMA_ASSERT(allocator && pool);
    16085 
    16086  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16087 
    16088  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    16089 
    16090  return allocator->CheckPoolCorruption(pool);
    16091 }
    16092 
    16093 VkResult vmaAllocateMemory(
    16094  VmaAllocator allocator,
    16095  const VkMemoryRequirements* pVkMemoryRequirements,
    16096  const VmaAllocationCreateInfo* pCreateInfo,
    16097  VmaAllocation* pAllocation,
    16098  VmaAllocationInfo* pAllocationInfo)
    16099 {
    16100  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    16101 
    16102  VMA_DEBUG_LOG("vmaAllocateMemory");
    16103 
    16104  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16105 
    16106  VkResult result = allocator->AllocateMemory(
    16107  *pVkMemoryRequirements,
    16108  false, // requiresDedicatedAllocation
    16109  false, // prefersDedicatedAllocation
    16110  VK_NULL_HANDLE, // dedicatedBuffer
    16111  VK_NULL_HANDLE, // dedicatedImage
    16112  *pCreateInfo,
    16113  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16114  1, // allocationCount
    16115  pAllocation);
    16116 
    16117 #if VMA_RECORDING_ENABLED
    16118  if(allocator->GetRecorder() != VMA_NULL)
    16119  {
    16120  allocator->GetRecorder()->RecordAllocateMemory(
    16121  allocator->GetCurrentFrameIndex(),
    16122  *pVkMemoryRequirements,
    16123  *pCreateInfo,
    16124  *pAllocation);
    16125  }
    16126 #endif
    16127 
    16128  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16129  {
    16130  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16131  }
    16132 
    16133  return result;
    16134 }
    16135 
    16136 VkResult vmaAllocateMemoryPages(
    16137  VmaAllocator allocator,
    16138  const VkMemoryRequirements* pVkMemoryRequirements,
    16139  const VmaAllocationCreateInfo* pCreateInfo,
    16140  size_t allocationCount,
    16141  VmaAllocation* pAllocations,
    16142  VmaAllocationInfo* pAllocationInfo)
    16143 {
    16144  if(allocationCount == 0)
    16145  {
    16146  return VK_SUCCESS;
    16147  }
    16148 
    16149  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
    16150 
    16151  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
    16152 
    16153  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16154 
    16155  VkResult result = allocator->AllocateMemory(
    16156  *pVkMemoryRequirements,
    16157  false, // requiresDedicatedAllocation
    16158  false, // prefersDedicatedAllocation
    16159  VK_NULL_HANDLE, // dedicatedBuffer
    16160  VK_NULL_HANDLE, // dedicatedImage
    16161  *pCreateInfo,
    16162  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16163  allocationCount,
    16164  pAllocations);
    16165 
    16166 #if VMA_RECORDING_ENABLED
    16167  if(allocator->GetRecorder() != VMA_NULL)
    16168  {
    16169  allocator->GetRecorder()->RecordAllocateMemoryPages(
    16170  allocator->GetCurrentFrameIndex(),
    16171  *pVkMemoryRequirements,
    16172  *pCreateInfo,
    16173  (uint64_t)allocationCount,
    16174  pAllocations);
    16175  }
    16176 #endif
    16177 
    16178  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16179  {
    16180  for(size_t i = 0; i < allocationCount; ++i)
    16181  {
    16182  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
    16183  }
    16184  }
    16185 
    16186  return result;
    16187 }
    16188 
    16190  VmaAllocator allocator,
    16191  VkBuffer buffer,
    16192  const VmaAllocationCreateInfo* pCreateInfo,
    16193  VmaAllocation* pAllocation,
    16194  VmaAllocationInfo* pAllocationInfo)
    16195 {
    16196  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16197 
    16198  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    16199 
    16200  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16201 
    16202  VkMemoryRequirements vkMemReq = {};
    16203  bool requiresDedicatedAllocation = false;
    16204  bool prefersDedicatedAllocation = false;
    16205  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    16206  requiresDedicatedAllocation,
    16207  prefersDedicatedAllocation);
    16208 
    16209  VkResult result = allocator->AllocateMemory(
    16210  vkMemReq,
    16211  requiresDedicatedAllocation,
    16212  prefersDedicatedAllocation,
    16213  buffer, // dedicatedBuffer
    16214  VK_NULL_HANDLE, // dedicatedImage
    16215  *pCreateInfo,
    16216  VMA_SUBALLOCATION_TYPE_BUFFER,
    16217  1, // allocationCount
    16218  pAllocation);
    16219 
    16220 #if VMA_RECORDING_ENABLED
    16221  if(allocator->GetRecorder() != VMA_NULL)
    16222  {
    16223  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    16224  allocator->GetCurrentFrameIndex(),
    16225  vkMemReq,
    16226  requiresDedicatedAllocation,
    16227  prefersDedicatedAllocation,
    16228  *pCreateInfo,
    16229  *pAllocation);
    16230  }
    16231 #endif
    16232 
    16233  if(pAllocationInfo && result == VK_SUCCESS)
    16234  {
    16235  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16236  }
    16237 
    16238  return result;
    16239 }
    16240 
    16241 VkResult vmaAllocateMemoryForImage(
    16242  VmaAllocator allocator,
    16243  VkImage image,
    16244  const VmaAllocationCreateInfo* pCreateInfo,
    16245  VmaAllocation* pAllocation,
    16246  VmaAllocationInfo* pAllocationInfo)
    16247 {
    16248  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16249 
    16250  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    16251 
    16252  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16253 
    16254  VkMemoryRequirements vkMemReq = {};
    16255  bool requiresDedicatedAllocation = false;
    16256  bool prefersDedicatedAllocation = false;
    16257  allocator->GetImageMemoryRequirements(image, vkMemReq,
    16258  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16259 
    16260  VkResult result = allocator->AllocateMemory(
    16261  vkMemReq,
    16262  requiresDedicatedAllocation,
    16263  prefersDedicatedAllocation,
    16264  VK_NULL_HANDLE, // dedicatedBuffer
    16265  image, // dedicatedImage
    16266  *pCreateInfo,
    16267  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    16268  1, // allocationCount
    16269  pAllocation);
    16270 
    16271 #if VMA_RECORDING_ENABLED
    16272  if(allocator->GetRecorder() != VMA_NULL)
    16273  {
    16274  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    16275  allocator->GetCurrentFrameIndex(),
    16276  vkMemReq,
    16277  requiresDedicatedAllocation,
    16278  prefersDedicatedAllocation,
    16279  *pCreateInfo,
    16280  *pAllocation);
    16281  }
    16282 #endif
    16283 
    16284  if(pAllocationInfo && result == VK_SUCCESS)
    16285  {
    16286  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16287  }
    16288 
    16289  return result;
    16290 }
    16291 
    16292 void vmaFreeMemory(
    16293  VmaAllocator allocator,
    16294  VmaAllocation allocation)
    16295 {
    16296  VMA_ASSERT(allocator);
    16297 
    16298  if(allocation == VK_NULL_HANDLE)
    16299  {
    16300  return;
    16301  }
    16302 
    16303  VMA_DEBUG_LOG("vmaFreeMemory");
    16304 
    16305  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16306 
    16307 #if VMA_RECORDING_ENABLED
    16308  if(allocator->GetRecorder() != VMA_NULL)
    16309  {
    16310  allocator->GetRecorder()->RecordFreeMemory(
    16311  allocator->GetCurrentFrameIndex(),
    16312  allocation);
    16313  }
    16314 #endif
    16315 
    16316  allocator->FreeMemory(
    16317  1, // allocationCount
    16318  &allocation);
    16319 }
    16320 
    16321 void vmaFreeMemoryPages(
    16322  VmaAllocator allocator,
    16323  size_t allocationCount,
    16324  VmaAllocation* pAllocations)
    16325 {
    16326  if(allocationCount == 0)
    16327  {
    16328  return;
    16329  }
    16330 
    16331  VMA_ASSERT(allocator);
    16332 
    16333  VMA_DEBUG_LOG("vmaFreeMemoryPages");
    16334 
    16335  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16336 
    16337 #if VMA_RECORDING_ENABLED
    16338  if(allocator->GetRecorder() != VMA_NULL)
    16339  {
    16340  allocator->GetRecorder()->RecordFreeMemoryPages(
    16341  allocator->GetCurrentFrameIndex(),
    16342  (uint64_t)allocationCount,
    16343  pAllocations);
    16344  }
    16345 #endif
    16346 
    16347  allocator->FreeMemory(allocationCount, pAllocations);
    16348 }
    16349 
    16350 VkResult vmaResizeAllocation(
    16351  VmaAllocator allocator,
    16352  VmaAllocation allocation,
    16353  VkDeviceSize newSize)
    16354 {
    16355  VMA_ASSERT(allocator && allocation);
    16356 
    16357  VMA_DEBUG_LOG("vmaResizeAllocation");
    16358 
    16359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16360 
    16361 #if VMA_RECORDING_ENABLED
    16362  if(allocator->GetRecorder() != VMA_NULL)
    16363  {
    16364  allocator->GetRecorder()->RecordResizeAllocation(
    16365  allocator->GetCurrentFrameIndex(),
    16366  allocation,
    16367  newSize);
    16368  }
    16369 #endif
    16370 
    16371  return allocator->ResizeAllocation(allocation, newSize);
    16372 }
    16373 
    16375  VmaAllocator allocator,
    16376  VmaAllocation allocation,
    16377  VmaAllocationInfo* pAllocationInfo)
    16378 {
    16379  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    16380 
    16381  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16382 
    16383 #if VMA_RECORDING_ENABLED
    16384  if(allocator->GetRecorder() != VMA_NULL)
    16385  {
    16386  allocator->GetRecorder()->RecordGetAllocationInfo(
    16387  allocator->GetCurrentFrameIndex(),
    16388  allocation);
    16389  }
    16390 #endif
    16391 
    16392  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    16393 }
    16394 
    16395 VkBool32 vmaTouchAllocation(
    16396  VmaAllocator allocator,
    16397  VmaAllocation allocation)
    16398 {
    16399  VMA_ASSERT(allocator && allocation);
    16400 
    16401  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16402 
    16403 #if VMA_RECORDING_ENABLED
    16404  if(allocator->GetRecorder() != VMA_NULL)
    16405  {
    16406  allocator->GetRecorder()->RecordTouchAllocation(
    16407  allocator->GetCurrentFrameIndex(),
    16408  allocation);
    16409  }
    16410 #endif
    16411 
    16412  return allocator->TouchAllocation(allocation);
    16413 }
    16414 
    16416  VmaAllocator allocator,
    16417  VmaAllocation allocation,
    16418  void* pUserData)
    16419 {
    16420  VMA_ASSERT(allocator && allocation);
    16421 
    16422  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16423 
    16424  allocation->SetUserData(allocator, pUserData);
    16425 
    16426 #if VMA_RECORDING_ENABLED
    16427  if(allocator->GetRecorder() != VMA_NULL)
    16428  {
    16429  allocator->GetRecorder()->RecordSetAllocationUserData(
    16430  allocator->GetCurrentFrameIndex(),
    16431  allocation,
    16432  pUserData);
    16433  }
    16434 #endif
    16435 }
    16436 
    16438  VmaAllocator allocator,
    16439  VmaAllocation* pAllocation)
    16440 {
    16441  VMA_ASSERT(allocator && pAllocation);
    16442 
    16443  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    16444 
    16445  allocator->CreateLostAllocation(pAllocation);
    16446 
    16447 #if VMA_RECORDING_ENABLED
    16448  if(allocator->GetRecorder() != VMA_NULL)
    16449  {
    16450  allocator->GetRecorder()->RecordCreateLostAllocation(
    16451  allocator->GetCurrentFrameIndex(),
    16452  *pAllocation);
    16453  }
    16454 #endif
    16455 }
    16456 
    16457 VkResult vmaMapMemory(
    16458  VmaAllocator allocator,
    16459  VmaAllocation allocation,
    16460  void** ppData)
    16461 {
    16462  VMA_ASSERT(allocator && allocation && ppData);
    16463 
    16464  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16465 
    16466  VkResult res = allocator->Map(allocation, ppData);
    16467 
    16468 #if VMA_RECORDING_ENABLED
    16469  if(allocator->GetRecorder() != VMA_NULL)
    16470  {
    16471  allocator->GetRecorder()->RecordMapMemory(
    16472  allocator->GetCurrentFrameIndex(),
    16473  allocation);
    16474  }
    16475 #endif
    16476 
    16477  return res;
    16478 }
    16479 
    16480 void vmaUnmapMemory(
    16481  VmaAllocator allocator,
    16482  VmaAllocation allocation)
    16483 {
    16484  VMA_ASSERT(allocator && allocation);
    16485 
    16486  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16487 
    16488 #if VMA_RECORDING_ENABLED
    16489  if(allocator->GetRecorder() != VMA_NULL)
    16490  {
    16491  allocator->GetRecorder()->RecordUnmapMemory(
    16492  allocator->GetCurrentFrameIndex(),
    16493  allocation);
    16494  }
    16495 #endif
    16496 
    16497  allocator->Unmap(allocation);
    16498 }
    16499 
    16500 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16501 {
    16502  VMA_ASSERT(allocator && allocation);
    16503 
    16504  VMA_DEBUG_LOG("vmaFlushAllocation");
    16505 
    16506  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16507 
    16508  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    16509 
    16510 #if VMA_RECORDING_ENABLED
    16511  if(allocator->GetRecorder() != VMA_NULL)
    16512  {
    16513  allocator->GetRecorder()->RecordFlushAllocation(
    16514  allocator->GetCurrentFrameIndex(),
    16515  allocation, offset, size);
    16516  }
    16517 #endif
    16518 }
    16519 
    16520 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16521 {
    16522  VMA_ASSERT(allocator && allocation);
    16523 
    16524  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    16525 
    16526  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16527 
    16528  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    16529 
    16530 #if VMA_RECORDING_ENABLED
    16531  if(allocator->GetRecorder() != VMA_NULL)
    16532  {
    16533  allocator->GetRecorder()->RecordInvalidateAllocation(
    16534  allocator->GetCurrentFrameIndex(),
    16535  allocation, offset, size);
    16536  }
    16537 #endif
    16538 }
    16539 
    16540 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    16541 {
    16542  VMA_ASSERT(allocator);
    16543 
    16544  VMA_DEBUG_LOG("vmaCheckCorruption");
    16545 
    16546  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16547 
    16548  return allocator->CheckCorruption(memoryTypeBits);
    16549 }
    16550 
    16551 VkResult vmaDefragment(
    16552  VmaAllocator allocator,
    16553  VmaAllocation* pAllocations,
    16554  size_t allocationCount,
    16555  VkBool32* pAllocationsChanged,
    16556  const VmaDefragmentationInfo *pDefragmentationInfo,
    16557  VmaDefragmentationStats* pDefragmentationStats)
    16558 {
    16559  // Deprecated interface, reimplemented using new one.
    16560 
    16561  VmaDefragmentationInfo2 info2 = {};
    16562  info2.allocationCount = (uint32_t)allocationCount;
    16563  info2.pAllocations = pAllocations;
    16564  info2.pAllocationsChanged = pAllocationsChanged;
    16565  if(pDefragmentationInfo != VMA_NULL)
    16566  {
    16567  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    16568  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
    16569  }
    16570  else
    16571  {
    16572  info2.maxCpuAllocationsToMove = UINT32_MAX;
    16573  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
    16574  }
    16575  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
    16576 
    16578  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
    16579  if(res == VK_NOT_READY)
    16580  {
    16581  res = vmaDefragmentationEnd( allocator, ctx);
    16582  }
    16583  return res;
    16584 }
    16585 
    16586 VkResult vmaDefragmentationBegin(
    16587  VmaAllocator allocator,
    16588  const VmaDefragmentationInfo2* pInfo,
    16589  VmaDefragmentationStats* pStats,
    16590  VmaDefragmentationContext *pContext)
    16591 {
    16592  VMA_ASSERT(allocator && pInfo && pContext);
    16593 
    16594  // Degenerate case: Nothing to defragment.
    16595  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
    16596  {
    16597  return VK_SUCCESS;
    16598  }
    16599 
    16600  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
    16601  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
    16602  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
    16603  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
    16604 
    16605  VMA_DEBUG_LOG("vmaDefragmentationBegin");
    16606 
    16607  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16608 
    16609  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
    16610 
    16611 #if VMA_RECORDING_ENABLED
    16612  if(allocator->GetRecorder() != VMA_NULL)
    16613  {
    16614  allocator->GetRecorder()->RecordDefragmentationBegin(
    16615  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
    16616  }
    16617 #endif
    16618 
    16619  return res;
    16620 }
    16621 
    16622 VkResult vmaDefragmentationEnd(
    16623  VmaAllocator allocator,
    16624  VmaDefragmentationContext context)
    16625 {
    16626  VMA_ASSERT(allocator);
    16627 
    16628  VMA_DEBUG_LOG("vmaDefragmentationEnd");
    16629 
    16630  if(context != VK_NULL_HANDLE)
    16631  {
    16632  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16633 
    16634 #if VMA_RECORDING_ENABLED
    16635  if(allocator->GetRecorder() != VMA_NULL)
    16636  {
    16637  allocator->GetRecorder()->RecordDefragmentationEnd(
    16638  allocator->GetCurrentFrameIndex(), context);
    16639  }
    16640 #endif
    16641 
    16642  return allocator->DefragmentationEnd(context);
    16643  }
    16644  else
    16645  {
    16646  return VK_SUCCESS;
    16647  }
    16648 }
    16649 
    16650 VkResult vmaBindBufferMemory(
    16651  VmaAllocator allocator,
    16652  VmaAllocation allocation,
    16653  VkBuffer buffer)
    16654 {
    16655  VMA_ASSERT(allocator && allocation && buffer);
    16656 
    16657  VMA_DEBUG_LOG("vmaBindBufferMemory");
    16658 
    16659  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16660 
    16661  return allocator->BindBufferMemory(allocation, buffer);
    16662 }
    16663 
    16664 VkResult vmaBindImageMemory(
    16665  VmaAllocator allocator,
    16666  VmaAllocation allocation,
    16667  VkImage image)
    16668 {
    16669  VMA_ASSERT(allocator && allocation && image);
    16670 
    16671  VMA_DEBUG_LOG("vmaBindImageMemory");
    16672 
    16673  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16674 
    16675  return allocator->BindImageMemory(allocation, image);
    16676 }
    16677 
    16678 VkResult vmaCreateBuffer(
    16679  VmaAllocator allocator,
    16680  const VkBufferCreateInfo* pBufferCreateInfo,
    16681  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16682  VkBuffer* pBuffer,
    16683  VmaAllocation* pAllocation,
    16684  VmaAllocationInfo* pAllocationInfo)
    16685 {
    16686  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    16687 
    16688  if(pBufferCreateInfo->size == 0)
    16689  {
    16690  return VK_ERROR_VALIDATION_FAILED_EXT;
    16691  }
    16692 
    16693  VMA_DEBUG_LOG("vmaCreateBuffer");
    16694 
    16695  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16696 
    16697  *pBuffer = VK_NULL_HANDLE;
    16698  *pAllocation = VK_NULL_HANDLE;
    16699 
    16700  // 1. Create VkBuffer.
    16701  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    16702  allocator->m_hDevice,
    16703  pBufferCreateInfo,
    16704  allocator->GetAllocationCallbacks(),
    16705  pBuffer);
    16706  if(res >= 0)
    16707  {
    16708  // 2. vkGetBufferMemoryRequirements.
    16709  VkMemoryRequirements vkMemReq = {};
    16710  bool requiresDedicatedAllocation = false;
    16711  bool prefersDedicatedAllocation = false;
    16712  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    16713  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16714 
    16715  // Make sure alignment requirements for specific buffer usages reported
    16716  // in Physical Device Properties are included in alignment reported by memory requirements.
    16717  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    16718  {
    16719  VMA_ASSERT(vkMemReq.alignment %
    16720  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    16721  }
    16722  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    16723  {
    16724  VMA_ASSERT(vkMemReq.alignment %
    16725  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    16726  }
    16727  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    16728  {
    16729  VMA_ASSERT(vkMemReq.alignment %
    16730  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    16731  }
    16732 
    16733  // 3. Allocate memory using allocator.
    16734  res = allocator->AllocateMemory(
    16735  vkMemReq,
    16736  requiresDedicatedAllocation,
    16737  prefersDedicatedAllocation,
    16738  *pBuffer, // dedicatedBuffer
    16739  VK_NULL_HANDLE, // dedicatedImage
    16740  *pAllocationCreateInfo,
    16741  VMA_SUBALLOCATION_TYPE_BUFFER,
    16742  1, // allocationCount
    16743  pAllocation);
    16744 
    16745 #if VMA_RECORDING_ENABLED
    16746  if(allocator->GetRecorder() != VMA_NULL)
    16747  {
    16748  allocator->GetRecorder()->RecordCreateBuffer(
    16749  allocator->GetCurrentFrameIndex(),
    16750  *pBufferCreateInfo,
    16751  *pAllocationCreateInfo,
    16752  *pAllocation);
    16753  }
    16754 #endif
    16755 
    16756  if(res >= 0)
    16757  {
    16758  // 3. Bind buffer with memory.
    16759  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16760  {
    16761  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    16762  }
    16763  if(res >= 0)
    16764  {
    16765  // All steps succeeded.
    16766  #if VMA_STATS_STRING_ENABLED
    16767  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    16768  #endif
    16769  if(pAllocationInfo != VMA_NULL)
    16770  {
    16771  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16772  }
    16773 
    16774  return VK_SUCCESS;
    16775  }
    16776  allocator->FreeMemory(
    16777  1, // allocationCount
    16778  pAllocation);
    16779  *pAllocation = VK_NULL_HANDLE;
    16780  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16781  *pBuffer = VK_NULL_HANDLE;
    16782  return res;
    16783  }
    16784  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16785  *pBuffer = VK_NULL_HANDLE;
    16786  return res;
    16787  }
    16788  return res;
    16789 }
    16790 
    16791 void vmaDestroyBuffer(
    16792  VmaAllocator allocator,
    16793  VkBuffer buffer,
    16794  VmaAllocation allocation)
    16795 {
    16796  VMA_ASSERT(allocator);
    16797 
    16798  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16799  {
    16800  return;
    16801  }
    16802 
    16803  VMA_DEBUG_LOG("vmaDestroyBuffer");
    16804 
    16805  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16806 
    16807 #if VMA_RECORDING_ENABLED
    16808  if(allocator->GetRecorder() != VMA_NULL)
    16809  {
    16810  allocator->GetRecorder()->RecordDestroyBuffer(
    16811  allocator->GetCurrentFrameIndex(),
    16812  allocation);
    16813  }
    16814 #endif
    16815 
    16816  if(buffer != VK_NULL_HANDLE)
    16817  {
    16818  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    16819  }
    16820 
    16821  if(allocation != VK_NULL_HANDLE)
    16822  {
    16823  allocator->FreeMemory(
    16824  1, // allocationCount
    16825  &allocation);
    16826  }
    16827 }
    16828 
    16829 VkResult vmaCreateImage(
    16830  VmaAllocator allocator,
    16831  const VkImageCreateInfo* pImageCreateInfo,
    16832  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16833  VkImage* pImage,
    16834  VmaAllocation* pAllocation,
    16835  VmaAllocationInfo* pAllocationInfo)
    16836 {
    16837  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    16838 
    16839  if(pImageCreateInfo->extent.width == 0 ||
    16840  pImageCreateInfo->extent.height == 0 ||
    16841  pImageCreateInfo->extent.depth == 0 ||
    16842  pImageCreateInfo->mipLevels == 0 ||
    16843  pImageCreateInfo->arrayLayers == 0)
    16844  {
    16845  return VK_ERROR_VALIDATION_FAILED_EXT;
    16846  }
    16847 
    16848  VMA_DEBUG_LOG("vmaCreateImage");
    16849 
    16850  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16851 
    16852  *pImage = VK_NULL_HANDLE;
    16853  *pAllocation = VK_NULL_HANDLE;
    16854 
    16855  // 1. Create VkImage.
    16856  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    16857  allocator->m_hDevice,
    16858  pImageCreateInfo,
    16859  allocator->GetAllocationCallbacks(),
    16860  pImage);
    16861  if(res >= 0)
    16862  {
    16863  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    16864  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    16865  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    16866 
    16867  // 2. Allocate memory using allocator.
    16868  VkMemoryRequirements vkMemReq = {};
    16869  bool requiresDedicatedAllocation = false;
    16870  bool prefersDedicatedAllocation = false;
    16871  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    16872  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16873 
    16874  res = allocator->AllocateMemory(
    16875  vkMemReq,
    16876  requiresDedicatedAllocation,
    16877  prefersDedicatedAllocation,
    16878  VK_NULL_HANDLE, // dedicatedBuffer
    16879  *pImage, // dedicatedImage
    16880  *pAllocationCreateInfo,
    16881  suballocType,
    16882  1, // allocationCount
    16883  pAllocation);
    16884 
    16885 #if VMA_RECORDING_ENABLED
    16886  if(allocator->GetRecorder() != VMA_NULL)
    16887  {
    16888  allocator->GetRecorder()->RecordCreateImage(
    16889  allocator->GetCurrentFrameIndex(),
    16890  *pImageCreateInfo,
    16891  *pAllocationCreateInfo,
    16892  *pAllocation);
    16893  }
    16894 #endif
    16895 
    16896  if(res >= 0)
    16897  {
    16898  // 3. Bind image with memory.
    16899  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16900  {
    16901  res = allocator->BindImageMemory(*pAllocation, *pImage);
    16902  }
    16903  if(res >= 0)
    16904  {
    16905  // All steps succeeded.
    16906  #if VMA_STATS_STRING_ENABLED
    16907  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    16908  #endif
    16909  if(pAllocationInfo != VMA_NULL)
    16910  {
    16911  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16912  }
    16913 
    16914  return VK_SUCCESS;
    16915  }
    16916  allocator->FreeMemory(
    16917  1, // allocationCount
    16918  pAllocation);
    16919  *pAllocation = VK_NULL_HANDLE;
    16920  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16921  *pImage = VK_NULL_HANDLE;
    16922  return res;
    16923  }
    16924  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16925  *pImage = VK_NULL_HANDLE;
    16926  return res;
    16927  }
    16928  return res;
    16929 }
    16930 
    16931 void vmaDestroyImage(
    16932  VmaAllocator allocator,
    16933  VkImage image,
    16934  VmaAllocation allocation)
    16935 {
    16936  VMA_ASSERT(allocator);
    16937 
    16938  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16939  {
    16940  return;
    16941  }
    16942 
    16943  VMA_DEBUG_LOG("vmaDestroyImage");
    16944 
    16945  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16946 
    16947 #if VMA_RECORDING_ENABLED
    16948  if(allocator->GetRecorder() != VMA_NULL)
    16949  {
    16950  allocator->GetRecorder()->RecordDestroyImage(
    16951  allocator->GetCurrentFrameIndex(),
    16952  allocation);
    16953  }
    16954 #endif
    16955 
    16956  if(image != VK_NULL_HANDLE)
    16957  {
    16958  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    16959  }
    16960  if(allocation != VK_NULL_HANDLE)
    16961  {
    16962  allocator->FreeMemory(
    16963  1, // allocationCount
    16964  &allocation);
    16965  }
    16966 }
    16967 
    16968 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1753
    +Go to the documentation of this file.
    1 //
    2 // Copyright (c) 2017-2019 Advanced Micro Devices, Inc. All rights reserved.
    3 //
    4 // Permission is hereby granted, free of charge, to any person obtaining a copy
    5 // of this software and associated documentation files (the "Software"), to deal
    6 // in the Software without restriction, including without limitation the rights
    7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    8 // copies of the Software, and to permit persons to whom the Software is
    9 // furnished to do so, subject to the following conditions:
    10 //
    11 // The above copyright notice and this permission notice shall be included in
    12 // all copies or substantial portions of the Software.
    13 //
    14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    20 // THE SOFTWARE.
    21 //
    22 
    23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
    24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
    25 
    26 #ifdef __cplusplus
    27 extern "C" {
    28 #endif
    29 
    1644 /*
    1645 Define this macro to 0/1 to disable/enable support for recording functionality,
    1646 available through VmaAllocatorCreateInfo::pRecordSettings.
    1647 */
    1648 #ifndef VMA_RECORDING_ENABLED
    1649  #ifdef _WIN32
    1650  #define VMA_RECORDING_ENABLED 1
    1651  #else
    1652  #define VMA_RECORDING_ENABLED 0
    1653  #endif
    1654 #endif
    1655 
    1656 #ifndef NOMINMAX
    1657  #define NOMINMAX // For windows.h
    1658 #endif
    1659 
    1660 #ifndef VULKAN_H_
    1661  #include <vulkan/vulkan.h>
    1662 #endif
    1663 
    1664 #if VMA_RECORDING_ENABLED
    1665  #include <windows.h>
    1666 #endif
    1667 
    1668 #if !defined(VMA_DEDICATED_ALLOCATION)
    1669  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
    1670  #define VMA_DEDICATED_ALLOCATION 1
    1671  #else
    1672  #define VMA_DEDICATED_ALLOCATION 0
    1673  #endif
    1674 #endif
    1675 
    1685 VK_DEFINE_HANDLE(VmaAllocator)
    1686 
    1687 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
    1689  VmaAllocator allocator,
    1690  uint32_t memoryType,
    1691  VkDeviceMemory memory,
    1692  VkDeviceSize size);
    1694 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
    1695  VmaAllocator allocator,
    1696  uint32_t memoryType,
    1697  VkDeviceMemory memory,
    1698  VkDeviceSize size);
    1699 
    1713 
    1743 
    1746 typedef VkFlags VmaAllocatorCreateFlags;
    1747 
    1752 typedef struct VmaVulkanFunctions {
    1753  PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
    1754  PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
    1755  PFN_vkAllocateMemory vkAllocateMemory;
    1756  PFN_vkFreeMemory vkFreeMemory;
    1757  PFN_vkMapMemory vkMapMemory;
    1758  PFN_vkUnmapMemory vkUnmapMemory;
    1759  PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
    1760  PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
    1761  PFN_vkBindBufferMemory vkBindBufferMemory;
    1762  PFN_vkBindImageMemory vkBindImageMemory;
    1763  PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
    1764  PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
    1765  PFN_vkCreateBuffer vkCreateBuffer;
    1766  PFN_vkDestroyBuffer vkDestroyBuffer;
    1767  PFN_vkCreateImage vkCreateImage;
    1768  PFN_vkDestroyImage vkDestroyImage;
    1769  PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
    1770 #if VMA_DEDICATED_ALLOCATION
    1771  PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
    1772  PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
    1773 #endif
    1775 
    1777 typedef enum VmaRecordFlagBits {
    1784 
    1787 typedef VkFlags VmaRecordFlags;
    1788 
    1790 typedef struct VmaRecordSettings
    1791 {
    1801  const char* pFilePath;
    1803 
    1806 {
    1810 
    1811  VkPhysicalDevice physicalDevice;
    1813 
    1814  VkDevice device;
    1816 
    1819 
    1820  const VkAllocationCallbacks* pAllocationCallbacks;
    1822 
    1862  const VkDeviceSize* pHeapSizeLimit;
    1883 
    1885 VkResult vmaCreateAllocator(
    1886  const VmaAllocatorCreateInfo* pCreateInfo,
    1887  VmaAllocator* pAllocator);
    1888 
    1890 void vmaDestroyAllocator(
    1891  VmaAllocator allocator);
    1892 
    1898  VmaAllocator allocator,
    1899  const VkPhysicalDeviceProperties** ppPhysicalDeviceProperties);
    1900 
    1906  VmaAllocator allocator,
    1907  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties);
    1908 
    1916  VmaAllocator allocator,
    1917  uint32_t memoryTypeIndex,
    1918  VkMemoryPropertyFlags* pFlags);
    1919 
    1929  VmaAllocator allocator,
    1930  uint32_t frameIndex);
    1931 
    1934 typedef struct VmaStatInfo
    1935 {
    1937  uint32_t blockCount;
    1943  VkDeviceSize usedBytes;
    1945  VkDeviceSize unusedBytes;
    1948 } VmaStatInfo;
    1949 
    1951 typedef struct VmaStats
    1952 {
    1953  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
    1954  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
    1956 } VmaStats;
    1957 
    1959 void vmaCalculateStats(
    1960  VmaAllocator allocator,
    1961  VmaStats* pStats);
    1962 
    1963 #ifndef VMA_STATS_STRING_ENABLED
    1964 #define VMA_STATS_STRING_ENABLED 1
    1965 #endif
    1966 
    1967 #if VMA_STATS_STRING_ENABLED
    1968 
    1970 
    1972 void vmaBuildStatsString(
    1973  VmaAllocator allocator,
    1974  char** ppStatsString,
    1975  VkBool32 detailedMap);
    1976 
    1977 void vmaFreeStatsString(
    1978  VmaAllocator allocator,
    1979  char* pStatsString);
    1980 
    1981 #endif // #if VMA_STATS_STRING_ENABLED
    1982 
    1991 VK_DEFINE_HANDLE(VmaPool)
    1992 
    1993 typedef enum VmaMemoryUsage
    1994 {
    2043 } VmaMemoryUsage;
    2044 
    2054 
    2115 
    2131 
    2141 
    2148 
    2152 
    2154 {
    2167  VkMemoryPropertyFlags requiredFlags;
    2172  VkMemoryPropertyFlags preferredFlags;
    2180  uint32_t memoryTypeBits;
    2193  void* pUserData;
    2195 
    2212 VkResult vmaFindMemoryTypeIndex(
    2213  VmaAllocator allocator,
    2214  uint32_t memoryTypeBits,
    2215  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2216  uint32_t* pMemoryTypeIndex);
    2217 
    2231  VmaAllocator allocator,
    2232  const VkBufferCreateInfo* pBufferCreateInfo,
    2233  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2234  uint32_t* pMemoryTypeIndex);
    2235 
    2249  VmaAllocator allocator,
    2250  const VkImageCreateInfo* pImageCreateInfo,
    2251  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    2252  uint32_t* pMemoryTypeIndex);
    2253 
    2274 
    2291 
    2302 
    2308 
    2311 typedef VkFlags VmaPoolCreateFlags;
    2312 
    2315 typedef struct VmaPoolCreateInfo {
    2330  VkDeviceSize blockSize;
    2359 
    2362 typedef struct VmaPoolStats {
    2365  VkDeviceSize size;
    2368  VkDeviceSize unusedSize;
    2381  VkDeviceSize unusedRangeSizeMax;
    2384  size_t blockCount;
    2385 } VmaPoolStats;
    2386 
    2393 VkResult vmaCreatePool(
    2394  VmaAllocator allocator,
    2395  const VmaPoolCreateInfo* pCreateInfo,
    2396  VmaPool* pPool);
    2397 
    2400 void vmaDestroyPool(
    2401  VmaAllocator allocator,
    2402  VmaPool pool);
    2403 
    2410 void vmaGetPoolStats(
    2411  VmaAllocator allocator,
    2412  VmaPool pool,
    2413  VmaPoolStats* pPoolStats);
    2414 
    2422  VmaAllocator allocator,
    2423  VmaPool pool,
    2424  size_t* pLostAllocationCount);
    2425 
    2440 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool);
    2441 
    2466 VK_DEFINE_HANDLE(VmaAllocation)
    2467 
    2468 
    2470 typedef struct VmaAllocationInfo {
    2475  uint32_t memoryType;
    2484  VkDeviceMemory deviceMemory;
    2489  VkDeviceSize offset;
    2494  VkDeviceSize size;
    2508  void* pUserData;
    2510 
    2521 VkResult vmaAllocateMemory(
    2522  VmaAllocator allocator,
    2523  const VkMemoryRequirements* pVkMemoryRequirements,
    2524  const VmaAllocationCreateInfo* pCreateInfo,
    2525  VmaAllocation* pAllocation,
    2526  VmaAllocationInfo* pAllocationInfo);
    2527 
    2547 VkResult vmaAllocateMemoryPages(
    2548  VmaAllocator allocator,
    2549  const VkMemoryRequirements* pVkMemoryRequirements,
    2550  const VmaAllocationCreateInfo* pCreateInfo,
    2551  size_t allocationCount,
    2552  VmaAllocation* pAllocations,
    2553  VmaAllocationInfo* pAllocationInfo);
    2554 
    2562  VmaAllocator allocator,
    2563  VkBuffer buffer,
    2564  const VmaAllocationCreateInfo* pCreateInfo,
    2565  VmaAllocation* pAllocation,
    2566  VmaAllocationInfo* pAllocationInfo);
    2567 
    2569 VkResult vmaAllocateMemoryForImage(
    2570  VmaAllocator allocator,
    2571  VkImage image,
    2572  const VmaAllocationCreateInfo* pCreateInfo,
    2573  VmaAllocation* pAllocation,
    2574  VmaAllocationInfo* pAllocationInfo);
    2575 
    2580 void vmaFreeMemory(
    2581  VmaAllocator allocator,
    2582  VmaAllocation allocation);
    2583 
    2594 void vmaFreeMemoryPages(
    2595  VmaAllocator allocator,
    2596  size_t allocationCount,
    2597  VmaAllocation* pAllocations);
    2598 
    2619 VkResult vmaResizeAllocation(
    2620  VmaAllocator allocator,
    2621  VmaAllocation allocation,
    2622  VkDeviceSize newSize);
    2623 
    2641  VmaAllocator allocator,
    2642  VmaAllocation allocation,
    2643  VmaAllocationInfo* pAllocationInfo);
    2644 
    2659 VkBool32 vmaTouchAllocation(
    2660  VmaAllocator allocator,
    2661  VmaAllocation allocation);
    2662 
    2677  VmaAllocator allocator,
    2678  VmaAllocation allocation,
    2679  void* pUserData);
    2680 
    2692  VmaAllocator allocator,
    2693  VmaAllocation* pAllocation);
    2694 
    2729 VkResult vmaMapMemory(
    2730  VmaAllocator allocator,
    2731  VmaAllocation allocation,
    2732  void** ppData);
    2733 
    2738 void vmaUnmapMemory(
    2739  VmaAllocator allocator,
    2740  VmaAllocation allocation);
    2741 
    2758 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2759 
    2776 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    2777 
    2794 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits);
    2795 
    2802 VK_DEFINE_HANDLE(VmaDefragmentationContext)
    2803 
    2804 typedef enum VmaDefragmentationFlagBits {
    2808 typedef VkFlags VmaDefragmentationFlags;
    2809 
    2814 typedef struct VmaDefragmentationInfo2 {
    2838  uint32_t poolCount;
    2859  VkDeviceSize maxCpuBytesToMove;
    2869  VkDeviceSize maxGpuBytesToMove;
    2883  VkCommandBuffer commandBuffer;
    2885 
    2890 typedef struct VmaDefragmentationInfo {
    2895  VkDeviceSize maxBytesToMove;
    2902 
    2904 typedef struct VmaDefragmentationStats {
    2906  VkDeviceSize bytesMoved;
    2908  VkDeviceSize bytesFreed;
    2914 
    2941 VkResult vmaDefragmentationBegin(
    2942  VmaAllocator allocator,
    2943  const VmaDefragmentationInfo2* pInfo,
    2944  VmaDefragmentationStats* pStats,
    2945  VmaDefragmentationContext *pContext);
    2946 
    2952 VkResult vmaDefragmentationEnd(
    2953  VmaAllocator allocator,
    2954  VmaDefragmentationContext context);
    2955 
    2996 VkResult vmaDefragment(
    2997  VmaAllocator allocator,
    2998  VmaAllocation* pAllocations,
    2999  size_t allocationCount,
    3000  VkBool32* pAllocationsChanged,
    3001  const VmaDefragmentationInfo *pDefragmentationInfo,
    3002  VmaDefragmentationStats* pDefragmentationStats);
    3003 
    3016 VkResult vmaBindBufferMemory(
    3017  VmaAllocator allocator,
    3018  VmaAllocation allocation,
    3019  VkBuffer buffer);
    3020 
    3033 VkResult vmaBindImageMemory(
    3034  VmaAllocator allocator,
    3035  VmaAllocation allocation,
    3036  VkImage image);
    3037 
    3064 VkResult vmaCreateBuffer(
    3065  VmaAllocator allocator,
    3066  const VkBufferCreateInfo* pBufferCreateInfo,
    3067  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3068  VkBuffer* pBuffer,
    3069  VmaAllocation* pAllocation,
    3070  VmaAllocationInfo* pAllocationInfo);
    3071 
    3083 void vmaDestroyBuffer(
    3084  VmaAllocator allocator,
    3085  VkBuffer buffer,
    3086  VmaAllocation allocation);
    3087 
    3089 VkResult vmaCreateImage(
    3090  VmaAllocator allocator,
    3091  const VkImageCreateInfo* pImageCreateInfo,
    3092  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    3093  VkImage* pImage,
    3094  VmaAllocation* pAllocation,
    3095  VmaAllocationInfo* pAllocationInfo);
    3096 
    3108 void vmaDestroyImage(
    3109  VmaAllocator allocator,
    3110  VkImage image,
    3111  VmaAllocation allocation);
    3112 
    3113 #ifdef __cplusplus
    3114 }
    3115 #endif
    3116 
    3117 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
    3118 
    3119 // For Visual Studio IntelliSense.
    3120 #if defined(__cplusplus) && defined(__INTELLISENSE__)
    3121 #define VMA_IMPLEMENTATION
    3122 #endif
    3123 
    3124 #ifdef VMA_IMPLEMENTATION
    3125 #undef VMA_IMPLEMENTATION
    3126 
    3127 #include <cstdint>
    3128 #include <cstdlib>
    3129 #include <cstring>
    3130 
    3131 /*******************************************************************************
    3132 CONFIGURATION SECTION
    3133 
    3134 Define some of these macros before each #include of this header or change them
    3135 here if you need other then default behavior depending on your environment.
    3136 */
    3137 
    3138 /*
    3139 Define this macro to 1 to make the library fetch pointers to Vulkan functions
    3140 internally, like:
    3141 
    3142  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
    3143 
    3144 Define to 0 if you are going to provide you own pointers to Vulkan functions via
    3145 VmaAllocatorCreateInfo::pVulkanFunctions.
    3146 */
    3147 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
    3148 #define VMA_STATIC_VULKAN_FUNCTIONS 1
    3149 #endif
    3150 
    3151 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
    3152 //#define VMA_USE_STL_CONTAINERS 1
    3153 
    3154 /* Set this macro to 1 to make the library including and using STL containers:
    3155 std::pair, std::vector, std::list, std::unordered_map.
    3156 
    3157 Set it to 0 or undefined to make the library using its own implementation of
    3158 the containers.
    3159 */
    3160 #if VMA_USE_STL_CONTAINERS
    3161  #define VMA_USE_STL_VECTOR 1
    3162  #define VMA_USE_STL_UNORDERED_MAP 1
    3163  #define VMA_USE_STL_LIST 1
    3164 #endif
    3165 
    3166 #ifndef VMA_USE_STL_SHARED_MUTEX
    3167  // Compiler conforms to C++17.
    3168  #if __cplusplus >= 201703L
    3169  #define VMA_USE_STL_SHARED_MUTEX 1
    3170  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
    3171  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
    3172  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
    3173  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
    3174  #define VMA_USE_STL_SHARED_MUTEX 1
    3175  #else
    3176  #define VMA_USE_STL_SHARED_MUTEX 0
    3177  #endif
    3178 #endif
    3179 
    3180 #if VMA_USE_STL_VECTOR
    3181  #include <vector>
    3182 #endif
    3183 
    3184 #if VMA_USE_STL_UNORDERED_MAP
    3185  #include <unordered_map>
    3186 #endif
    3187 
    3188 #if VMA_USE_STL_LIST
    3189  #include <list>
    3190 #endif
    3191 
    3192 /*
    3193 Following headers are used in this CONFIGURATION section only, so feel free to
    3194 remove them if not needed.
    3195 */
    3196 #include <cassert> // for assert
    3197 #include <algorithm> // for min, max
    3198 #include <mutex>
    3199 #include <atomic> // for std::atomic
    3200 
    3201 #ifndef VMA_NULL
    3202  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
    3203  #define VMA_NULL nullptr
    3204 #endif
    3205 
    3206 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
    3207 #include <cstdlib>
    3208 void *aligned_alloc(size_t alignment, size_t size)
    3209 {
    3210  // alignment must be >= sizeof(void*)
    3211  if(alignment < sizeof(void*))
    3212  {
    3213  alignment = sizeof(void*);
    3214  }
    3215 
    3216  return memalign(alignment, size);
    3217 }
    3218 #elif defined(__APPLE__) || defined(__ANDROID__)
    3219 #include <cstdlib>
    3220 void *aligned_alloc(size_t alignment, size_t size)
    3221 {
    3222  // alignment must be >= sizeof(void*)
    3223  if(alignment < sizeof(void*))
    3224  {
    3225  alignment = sizeof(void*);
    3226  }
    3227 
    3228  void *pointer;
    3229  if(posix_memalign(&pointer, alignment, size) == 0)
    3230  return pointer;
    3231  return VMA_NULL;
    3232 }
    3233 #endif
    3234 
    3235 // If your compiler is not compatible with C++11 and definition of
    3236 // aligned_alloc() function is missing, uncommeting following line may help:
    3237 
    3238 //#include <malloc.h>
    3239 
    3240 // Normal assert to check for programmer's errors, especially in Debug configuration.
    3241 #ifndef VMA_ASSERT
    3242  #ifdef _DEBUG
    3243  #define VMA_ASSERT(expr) assert(expr)
    3244  #else
    3245  #define VMA_ASSERT(expr)
    3246  #endif
    3247 #endif
    3248 
    3249 // Assert that will be called very often, like inside data structures e.g. operator[].
    3250 // Making it non-empty can make program slow.
    3251 #ifndef VMA_HEAVY_ASSERT
    3252  #ifdef _DEBUG
    3253  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
    3254  #else
    3255  #define VMA_HEAVY_ASSERT(expr)
    3256  #endif
    3257 #endif
    3258 
    3259 #ifndef VMA_ALIGN_OF
    3260  #define VMA_ALIGN_OF(type) (__alignof(type))
    3261 #endif
    3262 
    3263 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
    3264  #if defined(_WIN32)
    3265  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (_aligned_malloc((size), (alignment)))
    3266  #else
    3267  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) (aligned_alloc((alignment), (size) ))
    3268  #endif
    3269 #endif
    3270 
    3271 #ifndef VMA_SYSTEM_FREE
    3272  #if defined(_WIN32)
    3273  #define VMA_SYSTEM_FREE(ptr) _aligned_free(ptr)
    3274  #else
    3275  #define VMA_SYSTEM_FREE(ptr) free(ptr)
    3276  #endif
    3277 #endif
    3278 
    3279 #ifndef VMA_MIN
    3280  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
    3281 #endif
    3282 
    3283 #ifndef VMA_MAX
    3284  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
    3285 #endif
    3286 
    3287 #ifndef VMA_SWAP
    3288  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
    3289 #endif
    3290 
    3291 #ifndef VMA_SORT
    3292  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
    3293 #endif
    3294 
    3295 #ifndef VMA_DEBUG_LOG
    3296  #define VMA_DEBUG_LOG(format, ...)
    3297  /*
    3298  #define VMA_DEBUG_LOG(format, ...) do { \
    3299  printf(format, __VA_ARGS__); \
    3300  printf("\n"); \
    3301  } while(false)
    3302  */
    3303 #endif
    3304 
    3305 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
    3306 #if VMA_STATS_STRING_ENABLED
    3307  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
    3308  {
    3309  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
    3310  }
    3311  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
    3312  {
    3313  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
    3314  }
    3315  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
    3316  {
    3317  snprintf(outStr, strLen, "%p", ptr);
    3318  }
    3319 #endif
    3320 
    3321 #ifndef VMA_MUTEX
    3322  class VmaMutex
    3323  {
    3324  public:
    3325  void Lock() { m_Mutex.lock(); }
    3326  void Unlock() { m_Mutex.unlock(); }
    3327  private:
    3328  std::mutex m_Mutex;
    3329  };
    3330  #define VMA_MUTEX VmaMutex
    3331 #endif
    3332 
    3333 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
    3334 #ifndef VMA_RW_MUTEX
    3335  #if VMA_USE_STL_SHARED_MUTEX
    3336  // Use std::shared_mutex from C++17.
    3337  #include <shared_mutex>
    3338  class VmaRWMutex
    3339  {
    3340  public:
    3341  void LockRead() { m_Mutex.lock_shared(); }
    3342  void UnlockRead() { m_Mutex.unlock_shared(); }
    3343  void LockWrite() { m_Mutex.lock(); }
    3344  void UnlockWrite() { m_Mutex.unlock(); }
    3345  private:
    3346  std::shared_mutex m_Mutex;
    3347  };
    3348  #define VMA_RW_MUTEX VmaRWMutex
    3349  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
    3350  // Use SRWLOCK from WinAPI.
    3351  // Minimum supported client = Windows Vista, server = Windows Server 2008.
    3352  class VmaRWMutex
    3353  {
    3354  public:
    3355  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
    3356  void LockRead() { AcquireSRWLockShared(&m_Lock); }
    3357  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
    3358  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
    3359  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
    3360  private:
    3361  SRWLOCK m_Lock;
    3362  };
    3363  #define VMA_RW_MUTEX VmaRWMutex
    3364  #else
    3365  // Less efficient fallback: Use normal mutex.
    3366  class VmaRWMutex
    3367  {
    3368  public:
    3369  void LockRead() { m_Mutex.Lock(); }
    3370  void UnlockRead() { m_Mutex.Unlock(); }
    3371  void LockWrite() { m_Mutex.Lock(); }
    3372  void UnlockWrite() { m_Mutex.Unlock(); }
    3373  private:
    3374  VMA_MUTEX m_Mutex;
    3375  };
    3376  #define VMA_RW_MUTEX VmaRWMutex
    3377  #endif // #if VMA_USE_STL_SHARED_MUTEX
    3378 #endif // #ifndef VMA_RW_MUTEX
    3379 
    3380 /*
    3381 If providing your own implementation, you need to implement a subset of std::atomic:
    3382 
    3383 - Constructor(uint32_t desired)
    3384 - uint32_t load() const
    3385 - void store(uint32_t desired)
    3386 - bool compare_exchange_weak(uint32_t& expected, uint32_t desired)
    3387 */
    3388 #ifndef VMA_ATOMIC_UINT32
    3389  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
    3390 #endif
    3391 
    3392 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
    3393 
    3397  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
    3398 #endif
    3399 
    3400 #ifndef VMA_DEBUG_ALIGNMENT
    3401 
    3405  #define VMA_DEBUG_ALIGNMENT (1)
    3406 #endif
    3407 
    3408 #ifndef VMA_DEBUG_MARGIN
    3409 
    3413  #define VMA_DEBUG_MARGIN (0)
    3414 #endif
    3415 
    3416 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
    3417 
    3421  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
    3422 #endif
    3423 
    3424 #ifndef VMA_DEBUG_DETECT_CORRUPTION
    3425 
    3430  #define VMA_DEBUG_DETECT_CORRUPTION (0)
    3431 #endif
    3432 
    3433 #ifndef VMA_DEBUG_GLOBAL_MUTEX
    3434 
    3438  #define VMA_DEBUG_GLOBAL_MUTEX (0)
    3439 #endif
    3440 
    3441 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
    3442 
    3446  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
    3447 #endif
    3448 
    3449 #ifndef VMA_SMALL_HEAP_MAX_SIZE
    3450  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
    3452 #endif
    3453 
    3454 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
    3455  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
    3457 #endif
    3458 
    3459 #ifndef VMA_CLASS_NO_COPY
    3460  #define VMA_CLASS_NO_COPY(className) \
    3461  private: \
    3462  className(const className&) = delete; \
    3463  className& operator=(const className&) = delete;
    3464 #endif
    3465 
    3466 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
    3467 
    3468 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
    3469 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
    3470 
    3471 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
    3472 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
    3473 
    3474 /*******************************************************************************
    3475 END OF CONFIGURATION
    3476 */
    3477 
    3478 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
    3479 
    3480 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
    3481  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
    3482 
    3483 // Returns number of bits set to 1 in (v).
    3484 static inline uint32_t VmaCountBitsSet(uint32_t v)
    3485 {
    3486  uint32_t c = v - ((v >> 1) & 0x55555555);
    3487  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
    3488  c = ((c >> 4) + c) & 0x0F0F0F0F;
    3489  c = ((c >> 8) + c) & 0x00FF00FF;
    3490  c = ((c >> 16) + c) & 0x0000FFFF;
    3491  return c;
    3492 }
    3493 
    3494 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
    3495 // Use types like uint32_t, uint64_t as T.
    3496 template <typename T>
    3497 static inline T VmaAlignUp(T val, T align)
    3498 {
    3499  return (val + align - 1) / align * align;
    3500 }
    3501 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
    3502 // Use types like uint32_t, uint64_t as T.
    3503 template <typename T>
    3504 static inline T VmaAlignDown(T val, T align)
    3505 {
    3506  return val / align * align;
    3507 }
    3508 
    3509 // Division with mathematical rounding to nearest number.
    3510 template <typename T>
    3511 static inline T VmaRoundDiv(T x, T y)
    3512 {
    3513  return (x + (y / (T)2)) / y;
    3514 }
    3515 
    3516 /*
    3517 Returns true if given number is a power of two.
    3518 T must be unsigned integer number or signed integer but always nonnegative.
    3519 For 0 returns true.
    3520 */
    3521 template <typename T>
    3522 inline bool VmaIsPow2(T x)
    3523 {
    3524  return (x & (x-1)) == 0;
    3525 }
    3526 
    3527 // Returns smallest power of 2 greater or equal to v.
    3528 static inline uint32_t VmaNextPow2(uint32_t v)
    3529 {
    3530  v--;
    3531  v |= v >> 1;
    3532  v |= v >> 2;
    3533  v |= v >> 4;
    3534  v |= v >> 8;
    3535  v |= v >> 16;
    3536  v++;
    3537  return v;
    3538 }
    3539 static inline uint64_t VmaNextPow2(uint64_t v)
    3540 {
    3541  v--;
    3542  v |= v >> 1;
    3543  v |= v >> 2;
    3544  v |= v >> 4;
    3545  v |= v >> 8;
    3546  v |= v >> 16;
    3547  v |= v >> 32;
    3548  v++;
    3549  return v;
    3550 }
    3551 
    3552 // Returns largest power of 2 less or equal to v.
    3553 static inline uint32_t VmaPrevPow2(uint32_t v)
    3554 {
    3555  v |= v >> 1;
    3556  v |= v >> 2;
    3557  v |= v >> 4;
    3558  v |= v >> 8;
    3559  v |= v >> 16;
    3560  v = v ^ (v >> 1);
    3561  return v;
    3562 }
    3563 static inline uint64_t VmaPrevPow2(uint64_t v)
    3564 {
    3565  v |= v >> 1;
    3566  v |= v >> 2;
    3567  v |= v >> 4;
    3568  v |= v >> 8;
    3569  v |= v >> 16;
    3570  v |= v >> 32;
    3571  v = v ^ (v >> 1);
    3572  return v;
    3573 }
    3574 
    3575 static inline bool VmaStrIsEmpty(const char* pStr)
    3576 {
    3577  return pStr == VMA_NULL || *pStr == '\0';
    3578 }
    3579 
    3580 #if VMA_STATS_STRING_ENABLED
    3581 
    3582 static const char* VmaAlgorithmToStr(uint32_t algorithm)
    3583 {
    3584  switch(algorithm)
    3585  {
    3587  return "Linear";
    3589  return "Buddy";
    3590  case 0:
    3591  return "Default";
    3592  default:
    3593  VMA_ASSERT(0);
    3594  return "";
    3595  }
    3596 }
    3597 
    3598 #endif // #if VMA_STATS_STRING_ENABLED
    3599 
    3600 #ifndef VMA_SORT
    3601 
    3602 template<typename Iterator, typename Compare>
    3603 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
    3604 {
    3605  Iterator centerValue = end; --centerValue;
    3606  Iterator insertIndex = beg;
    3607  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
    3608  {
    3609  if(cmp(*memTypeIndex, *centerValue))
    3610  {
    3611  if(insertIndex != memTypeIndex)
    3612  {
    3613  VMA_SWAP(*memTypeIndex, *insertIndex);
    3614  }
    3615  ++insertIndex;
    3616  }
    3617  }
    3618  if(insertIndex != centerValue)
    3619  {
    3620  VMA_SWAP(*insertIndex, *centerValue);
    3621  }
    3622  return insertIndex;
    3623 }
    3624 
    3625 template<typename Iterator, typename Compare>
    3626 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
    3627 {
    3628  if(beg < end)
    3629  {
    3630  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
    3631  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
    3632  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
    3633  }
    3634 }
    3635 
    3636 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
    3637 
    3638 #endif // #ifndef VMA_SORT
    3639 
    3640 /*
    3641 Returns true if two memory blocks occupy overlapping pages.
    3642 ResourceA must be in less memory offset than ResourceB.
    3643 
    3644 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
    3645 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
    3646 */
    3647 static inline bool VmaBlocksOnSamePage(
    3648  VkDeviceSize resourceAOffset,
    3649  VkDeviceSize resourceASize,
    3650  VkDeviceSize resourceBOffset,
    3651  VkDeviceSize pageSize)
    3652 {
    3653  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
    3654  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
    3655  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
    3656  VkDeviceSize resourceBStart = resourceBOffset;
    3657  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
    3658  return resourceAEndPage == resourceBStartPage;
    3659 }
    3660 
    3661 enum VmaSuballocationType
    3662 {
    3663  VMA_SUBALLOCATION_TYPE_FREE = 0,
    3664  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
    3665  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
    3666  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
    3667  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
    3668  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
    3669  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
    3670 };
    3671 
    3672 /*
    3673 Returns true if given suballocation types could conflict and must respect
    3674 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
    3675 or linear image and another one is optimal image. If type is unknown, behave
    3676 conservatively.
    3677 */
    3678 static inline bool VmaIsBufferImageGranularityConflict(
    3679  VmaSuballocationType suballocType1,
    3680  VmaSuballocationType suballocType2)
    3681 {
    3682  if(suballocType1 > suballocType2)
    3683  {
    3684  VMA_SWAP(suballocType1, suballocType2);
    3685  }
    3686 
    3687  switch(suballocType1)
    3688  {
    3689  case VMA_SUBALLOCATION_TYPE_FREE:
    3690  return false;
    3691  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
    3692  return true;
    3693  case VMA_SUBALLOCATION_TYPE_BUFFER:
    3694  return
    3695  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3696  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3697  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
    3698  return
    3699  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    3700  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
    3701  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3702  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
    3703  return
    3704  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
    3705  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
    3706  return false;
    3707  default:
    3708  VMA_ASSERT(0);
    3709  return true;
    3710  }
    3711 }
    3712 
    3713 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
    3714 {
    3715  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
    3716  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3717  for(size_t i = 0; i < numberCount; ++i, ++pDst)
    3718  {
    3719  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
    3720  }
    3721 }
    3722 
    3723 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
    3724 {
    3725  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
    3726  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
    3727  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
    3728  {
    3729  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
    3730  {
    3731  return false;
    3732  }
    3733  }
    3734  return true;
    3735 }
    3736 
    3737 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
    3738 struct VmaMutexLock
    3739 {
    3740  VMA_CLASS_NO_COPY(VmaMutexLock)
    3741 public:
    3742  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
    3743  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3744  { if(m_pMutex) { m_pMutex->Lock(); } }
    3745  ~VmaMutexLock()
    3746  { if(m_pMutex) { m_pMutex->Unlock(); } }
    3747 private:
    3748  VMA_MUTEX* m_pMutex;
    3749 };
    3750 
    3751 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
    3752 struct VmaMutexLockRead
    3753 {
    3754  VMA_CLASS_NO_COPY(VmaMutexLockRead)
    3755 public:
    3756  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
    3757  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3758  { if(m_pMutex) { m_pMutex->LockRead(); } }
    3759  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
    3760 private:
    3761  VMA_RW_MUTEX* m_pMutex;
    3762 };
    3763 
    3764 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
    3765 struct VmaMutexLockWrite
    3766 {
    3767  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
    3768 public:
    3769  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
    3770  m_pMutex(useMutex ? &mutex : VMA_NULL)
    3771  { if(m_pMutex) { m_pMutex->LockWrite(); } }
    3772  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
    3773 private:
    3774  VMA_RW_MUTEX* m_pMutex;
    3775 };
    3776 
    3777 #if VMA_DEBUG_GLOBAL_MUTEX
    3778  static VMA_MUTEX gDebugGlobalMutex;
    3779  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
    3780 #else
    3781  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
    3782 #endif
    3783 
    3784 // Minimum size of a free suballocation to register it in the free suballocation collection.
    3785 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
    3786 
    3787 /*
    3788 Performs binary search and returns iterator to first element that is greater or
    3789 equal to (key), according to comparison (cmp).
    3790 
    3791 Cmp should return true if first argument is less than second argument.
    3792 
    3793 Returned value is the found element, if present in the collection or place where
    3794 new element with value (key) should be inserted.
    3795 */
    3796 template <typename CmpLess, typename IterT, typename KeyT>
    3797 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, CmpLess cmp)
    3798 {
    3799  size_t down = 0, up = (end - beg);
    3800  while(down < up)
    3801  {
    3802  const size_t mid = (down + up) / 2;
    3803  if(cmp(*(beg+mid), key))
    3804  {
    3805  down = mid + 1;
    3806  }
    3807  else
    3808  {
    3809  up = mid;
    3810  }
    3811  }
    3812  return beg + down;
    3813 }
    3814 
    3815 /*
    3816 Returns true if all pointers in the array are not-null and unique.
    3817 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
    3818 T must be pointer type, e.g. VmaAllocation, VmaPool.
    3819 */
    3820 template<typename T>
    3821 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
    3822 {
    3823  for(uint32_t i = 0; i < count; ++i)
    3824  {
    3825  const T iPtr = arr[i];
    3826  if(iPtr == VMA_NULL)
    3827  {
    3828  return false;
    3829  }
    3830  for(uint32_t j = i + 1; j < count; ++j)
    3831  {
    3832  if(iPtr == arr[j])
    3833  {
    3834  return false;
    3835  }
    3836  }
    3837  }
    3838  return true;
    3839 }
    3840 
    3842 // Memory allocation
    3843 
    3844 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
    3845 {
    3846  if((pAllocationCallbacks != VMA_NULL) &&
    3847  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
    3848  {
    3849  return (*pAllocationCallbacks->pfnAllocation)(
    3850  pAllocationCallbacks->pUserData,
    3851  size,
    3852  alignment,
    3853  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    3854  }
    3855  else
    3856  {
    3857  return VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
    3858  }
    3859 }
    3860 
    3861 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
    3862 {
    3863  if((pAllocationCallbacks != VMA_NULL) &&
    3864  (pAllocationCallbacks->pfnFree != VMA_NULL))
    3865  {
    3866  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
    3867  }
    3868  else
    3869  {
    3870  VMA_SYSTEM_FREE(ptr);
    3871  }
    3872 }
    3873 
    3874 template<typename T>
    3875 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
    3876 {
    3877  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
    3878 }
    3879 
    3880 template<typename T>
    3881 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
    3882 {
    3883  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
    3884 }
    3885 
    3886 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
    3887 
    3888 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
    3889 
    3890 template<typename T>
    3891 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
    3892 {
    3893  ptr->~T();
    3894  VmaFree(pAllocationCallbacks, ptr);
    3895 }
    3896 
    3897 template<typename T>
    3898 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
    3899 {
    3900  if(ptr != VMA_NULL)
    3901  {
    3902  for(size_t i = count; i--; )
    3903  {
    3904  ptr[i].~T();
    3905  }
    3906  VmaFree(pAllocationCallbacks, ptr);
    3907  }
    3908 }
    3909 
    3910 // STL-compatible allocator.
    3911 template<typename T>
    3912 class VmaStlAllocator
    3913 {
    3914 public:
    3915  const VkAllocationCallbacks* const m_pCallbacks;
    3916  typedef T value_type;
    3917 
    3918  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
    3919  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
    3920 
    3921  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
    3922  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
    3923 
    3924  template<typename U>
    3925  bool operator==(const VmaStlAllocator<U>& rhs) const
    3926  {
    3927  return m_pCallbacks == rhs.m_pCallbacks;
    3928  }
    3929  template<typename U>
    3930  bool operator!=(const VmaStlAllocator<U>& rhs) const
    3931  {
    3932  return m_pCallbacks != rhs.m_pCallbacks;
    3933  }
    3934 
    3935  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
    3936 };
    3937 
    3938 #if VMA_USE_STL_VECTOR
    3939 
    3940 #define VmaVector std::vector
    3941 
    3942 template<typename T, typename allocatorT>
    3943 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
    3944 {
    3945  vec.insert(vec.begin() + index, item);
    3946 }
    3947 
    3948 template<typename T, typename allocatorT>
    3949 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
    3950 {
    3951  vec.erase(vec.begin() + index);
    3952 }
    3953 
    3954 #else // #if VMA_USE_STL_VECTOR
    3955 
    3956 /* Class with interface compatible with subset of std::vector.
    3957 T must be POD because constructors and destructors are not called and memcpy is
    3958 used for these objects. */
    3959 template<typename T, typename AllocatorT>
    3960 class VmaVector
    3961 {
    3962 public:
    3963  typedef T value_type;
    3964 
    3965  VmaVector(const AllocatorT& allocator) :
    3966  m_Allocator(allocator),
    3967  m_pArray(VMA_NULL),
    3968  m_Count(0),
    3969  m_Capacity(0)
    3970  {
    3971  }
    3972 
    3973  VmaVector(size_t count, const AllocatorT& allocator) :
    3974  m_Allocator(allocator),
    3975  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
    3976  m_Count(count),
    3977  m_Capacity(count)
    3978  {
    3979  }
    3980 
    3981  VmaVector(const VmaVector<T, AllocatorT>& src) :
    3982  m_Allocator(src.m_Allocator),
    3983  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
    3984  m_Count(src.m_Count),
    3985  m_Capacity(src.m_Count)
    3986  {
    3987  if(m_Count != 0)
    3988  {
    3989  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
    3990  }
    3991  }
    3992 
    3993  ~VmaVector()
    3994  {
    3995  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    3996  }
    3997 
    3998  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
    3999  {
    4000  if(&rhs != this)
    4001  {
    4002  resize(rhs.m_Count);
    4003  if(m_Count != 0)
    4004  {
    4005  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
    4006  }
    4007  }
    4008  return *this;
    4009  }
    4010 
    4011  bool empty() const { return m_Count == 0; }
    4012  size_t size() const { return m_Count; }
    4013  T* data() { return m_pArray; }
    4014  const T* data() const { return m_pArray; }
    4015 
    4016  T& operator[](size_t index)
    4017  {
    4018  VMA_HEAVY_ASSERT(index < m_Count);
    4019  return m_pArray[index];
    4020  }
    4021  const T& operator[](size_t index) const
    4022  {
    4023  VMA_HEAVY_ASSERT(index < m_Count);
    4024  return m_pArray[index];
    4025  }
    4026 
    4027  T& front()
    4028  {
    4029  VMA_HEAVY_ASSERT(m_Count > 0);
    4030  return m_pArray[0];
    4031  }
    4032  const T& front() const
    4033  {
    4034  VMA_HEAVY_ASSERT(m_Count > 0);
    4035  return m_pArray[0];
    4036  }
    4037  T& back()
    4038  {
    4039  VMA_HEAVY_ASSERT(m_Count > 0);
    4040  return m_pArray[m_Count - 1];
    4041  }
    4042  const T& back() const
    4043  {
    4044  VMA_HEAVY_ASSERT(m_Count > 0);
    4045  return m_pArray[m_Count - 1];
    4046  }
    4047 
    4048  void reserve(size_t newCapacity, bool freeMemory = false)
    4049  {
    4050  newCapacity = VMA_MAX(newCapacity, m_Count);
    4051 
    4052  if((newCapacity < m_Capacity) && !freeMemory)
    4053  {
    4054  newCapacity = m_Capacity;
    4055  }
    4056 
    4057  if(newCapacity != m_Capacity)
    4058  {
    4059  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
    4060  if(m_Count != 0)
    4061  {
    4062  memcpy(newArray, m_pArray, m_Count * sizeof(T));
    4063  }
    4064  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4065  m_Capacity = newCapacity;
    4066  m_pArray = newArray;
    4067  }
    4068  }
    4069 
    4070  void resize(size_t newCount, bool freeMemory = false)
    4071  {
    4072  size_t newCapacity = m_Capacity;
    4073  if(newCount > m_Capacity)
    4074  {
    4075  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
    4076  }
    4077  else if(freeMemory)
    4078  {
    4079  newCapacity = newCount;
    4080  }
    4081 
    4082  if(newCapacity != m_Capacity)
    4083  {
    4084  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
    4085  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
    4086  if(elementsToCopy != 0)
    4087  {
    4088  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
    4089  }
    4090  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
    4091  m_Capacity = newCapacity;
    4092  m_pArray = newArray;
    4093  }
    4094 
    4095  m_Count = newCount;
    4096  }
    4097 
    4098  void clear(bool freeMemory = false)
    4099  {
    4100  resize(0, freeMemory);
    4101  }
    4102 
    4103  void insert(size_t index, const T& src)
    4104  {
    4105  VMA_HEAVY_ASSERT(index <= m_Count);
    4106  const size_t oldCount = size();
    4107  resize(oldCount + 1);
    4108  if(index < oldCount)
    4109  {
    4110  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
    4111  }
    4112  m_pArray[index] = src;
    4113  }
    4114 
    4115  void remove(size_t index)
    4116  {
    4117  VMA_HEAVY_ASSERT(index < m_Count);
    4118  const size_t oldCount = size();
    4119  if(index < oldCount - 1)
    4120  {
    4121  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
    4122  }
    4123  resize(oldCount - 1);
    4124  }
    4125 
    4126  void push_back(const T& src)
    4127  {
    4128  const size_t newIndex = size();
    4129  resize(newIndex + 1);
    4130  m_pArray[newIndex] = src;
    4131  }
    4132 
    4133  void pop_back()
    4134  {
    4135  VMA_HEAVY_ASSERT(m_Count > 0);
    4136  resize(size() - 1);
    4137  }
    4138 
    4139  void push_front(const T& src)
    4140  {
    4141  insert(0, src);
    4142  }
    4143 
    4144  void pop_front()
    4145  {
    4146  VMA_HEAVY_ASSERT(m_Count > 0);
    4147  remove(0);
    4148  }
    4149 
    4150  typedef T* iterator;
    4151 
    4152  iterator begin() { return m_pArray; }
    4153  iterator end() { return m_pArray + m_Count; }
    4154 
    4155 private:
    4156  AllocatorT m_Allocator;
    4157  T* m_pArray;
    4158  size_t m_Count;
    4159  size_t m_Capacity;
    4160 };
    4161 
    4162 template<typename T, typename allocatorT>
    4163 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
    4164 {
    4165  vec.insert(index, item);
    4166 }
    4167 
    4168 template<typename T, typename allocatorT>
    4169 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
    4170 {
    4171  vec.remove(index);
    4172 }
    4173 
    4174 #endif // #if VMA_USE_STL_VECTOR
    4175 
    4176 template<typename CmpLess, typename VectorT>
    4177 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
    4178 {
    4179  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4180  vector.data(),
    4181  vector.data() + vector.size(),
    4182  value,
    4183  CmpLess()) - vector.data();
    4184  VmaVectorInsert(vector, indexToInsert, value);
    4185  return indexToInsert;
    4186 }
    4187 
    4188 template<typename CmpLess, typename VectorT>
    4189 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
    4190 {
    4191  CmpLess comparator;
    4192  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
    4193  vector.begin(),
    4194  vector.end(),
    4195  value,
    4196  comparator);
    4197  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
    4198  {
    4199  size_t indexToRemove = it - vector.begin();
    4200  VmaVectorRemove(vector, indexToRemove);
    4201  return true;
    4202  }
    4203  return false;
    4204 }
    4205 
    4206 template<typename CmpLess, typename IterT, typename KeyT>
    4207 IterT VmaVectorFindSorted(const IterT& beg, const IterT& end, const KeyT& value)
    4208 {
    4209  CmpLess comparator;
    4210  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
    4211  beg, end, value, comparator);
    4212  if(it == end ||
    4213  (!comparator(*it, value) && !comparator(value, *it)))
    4214  {
    4215  return it;
    4216  }
    4217  return end;
    4218 }
    4219 
    4221 // class VmaPoolAllocator
    4222 
    4223 /*
    4224 Allocator for objects of type T using a list of arrays (pools) to speed up
    4225 allocation. Number of elements that can be allocated is not bounded because
    4226 allocator can create multiple blocks.
    4227 */
    4228 template<typename T>
    4229 class VmaPoolAllocator
    4230 {
    4231  VMA_CLASS_NO_COPY(VmaPoolAllocator)
    4232 public:
    4233  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
    4234  ~VmaPoolAllocator();
    4235  void Clear();
    4236  T* Alloc();
    4237  void Free(T* ptr);
    4238 
    4239 private:
    4240  union Item
    4241  {
    4242  uint32_t NextFreeIndex;
    4243  T Value;
    4244  };
    4245 
    4246  struct ItemBlock
    4247  {
    4248  Item* pItems;
    4249  uint32_t Capacity;
    4250  uint32_t FirstFreeIndex;
    4251  };
    4252 
    4253  const VkAllocationCallbacks* m_pAllocationCallbacks;
    4254  const uint32_t m_FirstBlockCapacity;
    4255  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
    4256 
    4257  ItemBlock& CreateNewBlock();
    4258 };
    4259 
    4260 template<typename T>
    4261 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
    4262  m_pAllocationCallbacks(pAllocationCallbacks),
    4263  m_FirstBlockCapacity(firstBlockCapacity),
    4264  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
    4265 {
    4266  VMA_ASSERT(m_FirstBlockCapacity > 1);
    4267 }
    4268 
    4269 template<typename T>
    4270 VmaPoolAllocator<T>::~VmaPoolAllocator()
    4271 {
    4272  Clear();
    4273 }
    4274 
    4275 template<typename T>
    4276 void VmaPoolAllocator<T>::Clear()
    4277 {
    4278  for(size_t i = m_ItemBlocks.size(); i--; )
    4279  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
    4280  m_ItemBlocks.clear();
    4281 }
    4282 
    4283 template<typename T>
    4284 T* VmaPoolAllocator<T>::Alloc()
    4285 {
    4286  for(size_t i = m_ItemBlocks.size(); i--; )
    4287  {
    4288  ItemBlock& block = m_ItemBlocks[i];
    4289  // This block has some free items: Use first one.
    4290  if(block.FirstFreeIndex != UINT32_MAX)
    4291  {
    4292  Item* const pItem = &block.pItems[block.FirstFreeIndex];
    4293  block.FirstFreeIndex = pItem->NextFreeIndex;
    4294  return &pItem->Value;
    4295  }
    4296  }
    4297 
    4298  // No block has free item: Create new one and use it.
    4299  ItemBlock& newBlock = CreateNewBlock();
    4300  Item* const pItem = &newBlock.pItems[0];
    4301  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
    4302  return &pItem->Value;
    4303 }
    4304 
    4305 template<typename T>
    4306 void VmaPoolAllocator<T>::Free(T* ptr)
    4307 {
    4308  // Search all memory blocks to find ptr.
    4309  for(size_t i = m_ItemBlocks.size(); i--; )
    4310  {
    4311  ItemBlock& block = m_ItemBlocks[i];
    4312 
    4313  // Casting to union.
    4314  Item* pItemPtr;
    4315  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
    4316 
    4317  // Check if pItemPtr is in address range of this block.
    4318  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
    4319  {
    4320  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
    4321  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
    4322  block.FirstFreeIndex = index;
    4323  return;
    4324  }
    4325  }
    4326  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
    4327 }
    4328 
    4329 template<typename T>
    4330 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
    4331 {
    4332  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
    4333  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
    4334 
    4335  const ItemBlock newBlock = {
    4336  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
    4337  newBlockCapacity,
    4338  0 };
    4339 
    4340  m_ItemBlocks.push_back(newBlock);
    4341 
    4342  // Setup singly-linked list of all free items in this block.
    4343  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
    4344  newBlock.pItems[i].NextFreeIndex = i + 1;
    4345  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
    4346  return m_ItemBlocks.back();
    4347 }
    4348 
    4350 // class VmaRawList, VmaList
    4351 
    4352 #if VMA_USE_STL_LIST
    4353 
    4354 #define VmaList std::list
    4355 
    4356 #else // #if VMA_USE_STL_LIST
    4357 
    4358 template<typename T>
    4359 struct VmaListItem
    4360 {
    4361  VmaListItem* pPrev;
    4362  VmaListItem* pNext;
    4363  T Value;
    4364 };
    4365 
    4366 // Doubly linked list.
    4367 template<typename T>
    4368 class VmaRawList
    4369 {
    4370  VMA_CLASS_NO_COPY(VmaRawList)
    4371 public:
    4372  typedef VmaListItem<T> ItemType;
    4373 
    4374  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
    4375  ~VmaRawList();
    4376  void Clear();
    4377 
    4378  size_t GetCount() const { return m_Count; }
    4379  bool IsEmpty() const { return m_Count == 0; }
    4380 
    4381  ItemType* Front() { return m_pFront; }
    4382  const ItemType* Front() const { return m_pFront; }
    4383  ItemType* Back() { return m_pBack; }
    4384  const ItemType* Back() const { return m_pBack; }
    4385 
    4386  ItemType* PushBack();
    4387  ItemType* PushFront();
    4388  ItemType* PushBack(const T& value);
    4389  ItemType* PushFront(const T& value);
    4390  void PopBack();
    4391  void PopFront();
    4392 
    4393  // Item can be null - it means PushBack.
    4394  ItemType* InsertBefore(ItemType* pItem);
    4395  // Item can be null - it means PushFront.
    4396  ItemType* InsertAfter(ItemType* pItem);
    4397 
    4398  ItemType* InsertBefore(ItemType* pItem, const T& value);
    4399  ItemType* InsertAfter(ItemType* pItem, const T& value);
    4400 
    4401  void Remove(ItemType* pItem);
    4402 
    4403 private:
    4404  const VkAllocationCallbacks* const m_pAllocationCallbacks;
    4405  VmaPoolAllocator<ItemType> m_ItemAllocator;
    4406  ItemType* m_pFront;
    4407  ItemType* m_pBack;
    4408  size_t m_Count;
    4409 };
    4410 
    4411 template<typename T>
    4412 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
    4413  m_pAllocationCallbacks(pAllocationCallbacks),
    4414  m_ItemAllocator(pAllocationCallbacks, 128),
    4415  m_pFront(VMA_NULL),
    4416  m_pBack(VMA_NULL),
    4417  m_Count(0)
    4418 {
    4419 }
    4420 
    4421 template<typename T>
    4422 VmaRawList<T>::~VmaRawList()
    4423 {
    4424  // Intentionally not calling Clear, because that would be unnecessary
    4425  // computations to return all items to m_ItemAllocator as free.
    4426 }
    4427 
    4428 template<typename T>
    4429 void VmaRawList<T>::Clear()
    4430 {
    4431  if(IsEmpty() == false)
    4432  {
    4433  ItemType* pItem = m_pBack;
    4434  while(pItem != VMA_NULL)
    4435  {
    4436  ItemType* const pPrevItem = pItem->pPrev;
    4437  m_ItemAllocator.Free(pItem);
    4438  pItem = pPrevItem;
    4439  }
    4440  m_pFront = VMA_NULL;
    4441  m_pBack = VMA_NULL;
    4442  m_Count = 0;
    4443  }
    4444 }
    4445 
    4446 template<typename T>
    4447 VmaListItem<T>* VmaRawList<T>::PushBack()
    4448 {
    4449  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4450  pNewItem->pNext = VMA_NULL;
    4451  if(IsEmpty())
    4452  {
    4453  pNewItem->pPrev = VMA_NULL;
    4454  m_pFront = pNewItem;
    4455  m_pBack = pNewItem;
    4456  m_Count = 1;
    4457  }
    4458  else
    4459  {
    4460  pNewItem->pPrev = m_pBack;
    4461  m_pBack->pNext = pNewItem;
    4462  m_pBack = pNewItem;
    4463  ++m_Count;
    4464  }
    4465  return pNewItem;
    4466 }
    4467 
    4468 template<typename T>
    4469 VmaListItem<T>* VmaRawList<T>::PushFront()
    4470 {
    4471  ItemType* const pNewItem = m_ItemAllocator.Alloc();
    4472  pNewItem->pPrev = VMA_NULL;
    4473  if(IsEmpty())
    4474  {
    4475  pNewItem->pNext = VMA_NULL;
    4476  m_pFront = pNewItem;
    4477  m_pBack = pNewItem;
    4478  m_Count = 1;
    4479  }
    4480  else
    4481  {
    4482  pNewItem->pNext = m_pFront;
    4483  m_pFront->pPrev = pNewItem;
    4484  m_pFront = pNewItem;
    4485  ++m_Count;
    4486  }
    4487  return pNewItem;
    4488 }
    4489 
    4490 template<typename T>
    4491 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
    4492 {
    4493  ItemType* const pNewItem = PushBack();
    4494  pNewItem->Value = value;
    4495  return pNewItem;
    4496 }
    4497 
    4498 template<typename T>
    4499 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
    4500 {
    4501  ItemType* const pNewItem = PushFront();
    4502  pNewItem->Value = value;
    4503  return pNewItem;
    4504 }
    4505 
    4506 template<typename T>
    4507 void VmaRawList<T>::PopBack()
    4508 {
    4509  VMA_HEAVY_ASSERT(m_Count > 0);
    4510  ItemType* const pBackItem = m_pBack;
    4511  ItemType* const pPrevItem = pBackItem->pPrev;
    4512  if(pPrevItem != VMA_NULL)
    4513  {
    4514  pPrevItem->pNext = VMA_NULL;
    4515  }
    4516  m_pBack = pPrevItem;
    4517  m_ItemAllocator.Free(pBackItem);
    4518  --m_Count;
    4519 }
    4520 
    4521 template<typename T>
    4522 void VmaRawList<T>::PopFront()
    4523 {
    4524  VMA_HEAVY_ASSERT(m_Count > 0);
    4525  ItemType* const pFrontItem = m_pFront;
    4526  ItemType* const pNextItem = pFrontItem->pNext;
    4527  if(pNextItem != VMA_NULL)
    4528  {
    4529  pNextItem->pPrev = VMA_NULL;
    4530  }
    4531  m_pFront = pNextItem;
    4532  m_ItemAllocator.Free(pFrontItem);
    4533  --m_Count;
    4534 }
    4535 
    4536 template<typename T>
    4537 void VmaRawList<T>::Remove(ItemType* pItem)
    4538 {
    4539  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
    4540  VMA_HEAVY_ASSERT(m_Count > 0);
    4541 
    4542  if(pItem->pPrev != VMA_NULL)
    4543  {
    4544  pItem->pPrev->pNext = pItem->pNext;
    4545  }
    4546  else
    4547  {
    4548  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4549  m_pFront = pItem->pNext;
    4550  }
    4551 
    4552  if(pItem->pNext != VMA_NULL)
    4553  {
    4554  pItem->pNext->pPrev = pItem->pPrev;
    4555  }
    4556  else
    4557  {
    4558  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4559  m_pBack = pItem->pPrev;
    4560  }
    4561 
    4562  m_ItemAllocator.Free(pItem);
    4563  --m_Count;
    4564 }
    4565 
    4566 template<typename T>
    4567 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
    4568 {
    4569  if(pItem != VMA_NULL)
    4570  {
    4571  ItemType* const prevItem = pItem->pPrev;
    4572  ItemType* const newItem = m_ItemAllocator.Alloc();
    4573  newItem->pPrev = prevItem;
    4574  newItem->pNext = pItem;
    4575  pItem->pPrev = newItem;
    4576  if(prevItem != VMA_NULL)
    4577  {
    4578  prevItem->pNext = newItem;
    4579  }
    4580  else
    4581  {
    4582  VMA_HEAVY_ASSERT(m_pFront == pItem);
    4583  m_pFront = newItem;
    4584  }
    4585  ++m_Count;
    4586  return newItem;
    4587  }
    4588  else
    4589  return PushBack();
    4590 }
    4591 
    4592 template<typename T>
    4593 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
    4594 {
    4595  if(pItem != VMA_NULL)
    4596  {
    4597  ItemType* const nextItem = pItem->pNext;
    4598  ItemType* const newItem = m_ItemAllocator.Alloc();
    4599  newItem->pNext = nextItem;
    4600  newItem->pPrev = pItem;
    4601  pItem->pNext = newItem;
    4602  if(nextItem != VMA_NULL)
    4603  {
    4604  nextItem->pPrev = newItem;
    4605  }
    4606  else
    4607  {
    4608  VMA_HEAVY_ASSERT(m_pBack == pItem);
    4609  m_pBack = newItem;
    4610  }
    4611  ++m_Count;
    4612  return newItem;
    4613  }
    4614  else
    4615  return PushFront();
    4616 }
    4617 
    4618 template<typename T>
    4619 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
    4620 {
    4621  ItemType* const newItem = InsertBefore(pItem);
    4622  newItem->Value = value;
    4623  return newItem;
    4624 }
    4625 
    4626 template<typename T>
    4627 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
    4628 {
    4629  ItemType* const newItem = InsertAfter(pItem);
    4630  newItem->Value = value;
    4631  return newItem;
    4632 }
    4633 
    4634 template<typename T, typename AllocatorT>
    4635 class VmaList
    4636 {
    4637  VMA_CLASS_NO_COPY(VmaList)
    4638 public:
    4639  class iterator
    4640  {
    4641  public:
    4642  iterator() :
    4643  m_pList(VMA_NULL),
    4644  m_pItem(VMA_NULL)
    4645  {
    4646  }
    4647 
    4648  T& operator*() const
    4649  {
    4650  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4651  return m_pItem->Value;
    4652  }
    4653  T* operator->() const
    4654  {
    4655  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4656  return &m_pItem->Value;
    4657  }
    4658 
    4659  iterator& operator++()
    4660  {
    4661  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4662  m_pItem = m_pItem->pNext;
    4663  return *this;
    4664  }
    4665  iterator& operator--()
    4666  {
    4667  if(m_pItem != VMA_NULL)
    4668  {
    4669  m_pItem = m_pItem->pPrev;
    4670  }
    4671  else
    4672  {
    4673  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4674  m_pItem = m_pList->Back();
    4675  }
    4676  return *this;
    4677  }
    4678 
    4679  iterator operator++(int)
    4680  {
    4681  iterator result = *this;
    4682  ++*this;
    4683  return result;
    4684  }
    4685  iterator operator--(int)
    4686  {
    4687  iterator result = *this;
    4688  --*this;
    4689  return result;
    4690  }
    4691 
    4692  bool operator==(const iterator& rhs) const
    4693  {
    4694  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4695  return m_pItem == rhs.m_pItem;
    4696  }
    4697  bool operator!=(const iterator& rhs) const
    4698  {
    4699  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4700  return m_pItem != rhs.m_pItem;
    4701  }
    4702 
    4703  private:
    4704  VmaRawList<T>* m_pList;
    4705  VmaListItem<T>* m_pItem;
    4706 
    4707  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
    4708  m_pList(pList),
    4709  m_pItem(pItem)
    4710  {
    4711  }
    4712 
    4713  friend class VmaList<T, AllocatorT>;
    4714  };
    4715 
    4716  class const_iterator
    4717  {
    4718  public:
    4719  const_iterator() :
    4720  m_pList(VMA_NULL),
    4721  m_pItem(VMA_NULL)
    4722  {
    4723  }
    4724 
    4725  const_iterator(const iterator& src) :
    4726  m_pList(src.m_pList),
    4727  m_pItem(src.m_pItem)
    4728  {
    4729  }
    4730 
    4731  const T& operator*() const
    4732  {
    4733  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4734  return m_pItem->Value;
    4735  }
    4736  const T* operator->() const
    4737  {
    4738  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4739  return &m_pItem->Value;
    4740  }
    4741 
    4742  const_iterator& operator++()
    4743  {
    4744  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
    4745  m_pItem = m_pItem->pNext;
    4746  return *this;
    4747  }
    4748  const_iterator& operator--()
    4749  {
    4750  if(m_pItem != VMA_NULL)
    4751  {
    4752  m_pItem = m_pItem->pPrev;
    4753  }
    4754  else
    4755  {
    4756  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
    4757  m_pItem = m_pList->Back();
    4758  }
    4759  return *this;
    4760  }
    4761 
    4762  const_iterator operator++(int)
    4763  {
    4764  const_iterator result = *this;
    4765  ++*this;
    4766  return result;
    4767  }
    4768  const_iterator operator--(int)
    4769  {
    4770  const_iterator result = *this;
    4771  --*this;
    4772  return result;
    4773  }
    4774 
    4775  bool operator==(const const_iterator& rhs) const
    4776  {
    4777  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4778  return m_pItem == rhs.m_pItem;
    4779  }
    4780  bool operator!=(const const_iterator& rhs) const
    4781  {
    4782  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
    4783  return m_pItem != rhs.m_pItem;
    4784  }
    4785 
    4786  private:
    4787  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
    4788  m_pList(pList),
    4789  m_pItem(pItem)
    4790  {
    4791  }
    4792 
    4793  const VmaRawList<T>* m_pList;
    4794  const VmaListItem<T>* m_pItem;
    4795 
    4796  friend class VmaList<T, AllocatorT>;
    4797  };
    4798 
    4799  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
    4800 
    4801  bool empty() const { return m_RawList.IsEmpty(); }
    4802  size_t size() const { return m_RawList.GetCount(); }
    4803 
    4804  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
    4805  iterator end() { return iterator(&m_RawList, VMA_NULL); }
    4806 
    4807  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
    4808  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
    4809 
    4810  void clear() { m_RawList.Clear(); }
    4811  void push_back(const T& value) { m_RawList.PushBack(value); }
    4812  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
    4813  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
    4814 
    4815 private:
    4816  VmaRawList<T> m_RawList;
    4817 };
    4818 
    4819 #endif // #if VMA_USE_STL_LIST
    4820 
    4822 // class VmaMap
    4823 
    4824 // Unused in this version.
    4825 #if 0
    4826 
    4827 #if VMA_USE_STL_UNORDERED_MAP
    4828 
    4829 #define VmaPair std::pair
    4830 
    4831 #define VMA_MAP_TYPE(KeyT, ValueT) \
    4832  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
    4833 
    4834 #else // #if VMA_USE_STL_UNORDERED_MAP
    4835 
    4836 template<typename T1, typename T2>
    4837 struct VmaPair
    4838 {
    4839  T1 first;
    4840  T2 second;
    4841 
    4842  VmaPair() : first(), second() { }
    4843  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
    4844 };
    4845 
    4846 /* Class compatible with subset of interface of std::unordered_map.
    4847 KeyT, ValueT must be POD because they will be stored in VmaVector.
    4848 */
    4849 template<typename KeyT, typename ValueT>
    4850 class VmaMap
    4851 {
    4852 public:
    4853  typedef VmaPair<KeyT, ValueT> PairType;
    4854  typedef PairType* iterator;
    4855 
    4856  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
    4857 
    4858  iterator begin() { return m_Vector.begin(); }
    4859  iterator end() { return m_Vector.end(); }
    4860 
    4861  void insert(const PairType& pair);
    4862  iterator find(const KeyT& key);
    4863  void erase(iterator it);
    4864 
    4865 private:
    4866  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
    4867 };
    4868 
    4869 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
    4870 
    4871 template<typename FirstT, typename SecondT>
    4872 struct VmaPairFirstLess
    4873 {
    4874  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
    4875  {
    4876  return lhs.first < rhs.first;
    4877  }
    4878  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
    4879  {
    4880  return lhs.first < rhsFirst;
    4881  }
    4882 };
    4883 
    4884 template<typename KeyT, typename ValueT>
    4885 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
    4886 {
    4887  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
    4888  m_Vector.data(),
    4889  m_Vector.data() + m_Vector.size(),
    4890  pair,
    4891  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
    4892  VmaVectorInsert(m_Vector, indexToInsert, pair);
    4893 }
    4894 
    4895 template<typename KeyT, typename ValueT>
    4896 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
    4897 {
    4898  PairType* it = VmaBinaryFindFirstNotLess(
    4899  m_Vector.data(),
    4900  m_Vector.data() + m_Vector.size(),
    4901  key,
    4902  VmaPairFirstLess<KeyT, ValueT>());
    4903  if((it != m_Vector.end()) && (it->first == key))
    4904  {
    4905  return it;
    4906  }
    4907  else
    4908  {
    4909  return m_Vector.end();
    4910  }
    4911 }
    4912 
    4913 template<typename KeyT, typename ValueT>
    4914 void VmaMap<KeyT, ValueT>::erase(iterator it)
    4915 {
    4916  VmaVectorRemove(m_Vector, it - m_Vector.begin());
    4917 }
    4918 
    4919 #endif // #if VMA_USE_STL_UNORDERED_MAP
    4920 
    4921 #endif // #if 0
    4922 
    4924 
    4925 class VmaDeviceMemoryBlock;
    4926 
    4927 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
    4928 
    4929 struct VmaAllocation_T
    4930 {
    4931 private:
    4932  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
    4933 
    4934  enum FLAGS
    4935  {
    4936  FLAG_USER_DATA_STRING = 0x01,
    4937  };
    4938 
    4939 public:
    4940  enum ALLOCATION_TYPE
    4941  {
    4942  ALLOCATION_TYPE_NONE,
    4943  ALLOCATION_TYPE_BLOCK,
    4944  ALLOCATION_TYPE_DEDICATED,
    4945  };
    4946 
    4947  /*
    4948  This struct cannot have constructor or destructor. It must be POD because it is
    4949  allocated using VmaPoolAllocator.
    4950  */
    4951 
    4952  void Ctor(uint32_t currentFrameIndex, bool userDataString)
    4953  {
    4954  m_Alignment = 1;
    4955  m_Size = 0;
    4956  m_pUserData = VMA_NULL;
    4957  m_LastUseFrameIndex = currentFrameIndex;
    4958  m_Type = (uint8_t)ALLOCATION_TYPE_NONE;
    4959  m_SuballocationType = (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN;
    4960  m_MapCount = 0;
    4961  m_Flags = userDataString ? (uint8_t)FLAG_USER_DATA_STRING : 0;
    4962 
    4963 #if VMA_STATS_STRING_ENABLED
    4964  m_CreationFrameIndex = currentFrameIndex;
    4965  m_BufferImageUsage = 0;
    4966 #endif
    4967  }
    4968 
    4969  void Dtor()
    4970  {
    4971  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
    4972 
    4973  // Check if owned string was freed.
    4974  VMA_ASSERT(m_pUserData == VMA_NULL);
    4975  }
    4976 
    4977  void InitBlockAllocation(
    4978  VmaDeviceMemoryBlock* block,
    4979  VkDeviceSize offset,
    4980  VkDeviceSize alignment,
    4981  VkDeviceSize size,
    4982  VmaSuballocationType suballocationType,
    4983  bool mapped,
    4984  bool canBecomeLost)
    4985  {
    4986  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    4987  VMA_ASSERT(block != VMA_NULL);
    4988  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    4989  m_Alignment = alignment;
    4990  m_Size = size;
    4991  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    4992  m_SuballocationType = (uint8_t)suballocationType;
    4993  m_BlockAllocation.m_Block = block;
    4994  m_BlockAllocation.m_Offset = offset;
    4995  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
    4996  }
    4997 
    4998  void InitLost()
    4999  {
    5000  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5001  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
    5002  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
    5003  m_BlockAllocation.m_Block = VMA_NULL;
    5004  m_BlockAllocation.m_Offset = 0;
    5005  m_BlockAllocation.m_CanBecomeLost = true;
    5006  }
    5007 
    5008  void ChangeBlockAllocation(
    5009  VmaAllocator hAllocator,
    5010  VmaDeviceMemoryBlock* block,
    5011  VkDeviceSize offset);
    5012 
    5013  void ChangeSize(VkDeviceSize newSize);
    5014  void ChangeOffset(VkDeviceSize newOffset);
    5015 
    5016  // pMappedData not null means allocation is created with MAPPED flag.
    5017  void InitDedicatedAllocation(
    5018  uint32_t memoryTypeIndex,
    5019  VkDeviceMemory hMemory,
    5020  VmaSuballocationType suballocationType,
    5021  void* pMappedData,
    5022  VkDeviceSize size)
    5023  {
    5024  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
    5025  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
    5026  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
    5027  m_Alignment = 0;
    5028  m_Size = size;
    5029  m_SuballocationType = (uint8_t)suballocationType;
    5030  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
    5031  m_DedicatedAllocation.m_MemoryTypeIndex = memoryTypeIndex;
    5032  m_DedicatedAllocation.m_hMemory = hMemory;
    5033  m_DedicatedAllocation.m_pMappedData = pMappedData;
    5034  }
    5035 
    5036  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
    5037  VkDeviceSize GetAlignment() const { return m_Alignment; }
    5038  VkDeviceSize GetSize() const { return m_Size; }
    5039  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
    5040  void* GetUserData() const { return m_pUserData; }
    5041  void SetUserData(VmaAllocator hAllocator, void* pUserData);
    5042  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
    5043 
    5044  VmaDeviceMemoryBlock* GetBlock() const
    5045  {
    5046  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    5047  return m_BlockAllocation.m_Block;
    5048  }
    5049  VkDeviceSize GetOffset() const;
    5050  VkDeviceMemory GetMemory() const;
    5051  uint32_t GetMemoryTypeIndex() const;
    5052  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
    5053  void* GetMappedData() const;
    5054  bool CanBecomeLost() const;
    5055 
    5056  uint32_t GetLastUseFrameIndex() const
    5057  {
    5058  return m_LastUseFrameIndex.load();
    5059  }
    5060  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
    5061  {
    5062  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
    5063  }
    5064  /*
    5065  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
    5066  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
    5067  - Else, returns false.
    5068 
    5069  If hAllocation is already lost, assert - you should not call it then.
    5070  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
    5071  */
    5072  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5073 
    5074  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
    5075  {
    5076  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
    5077  outInfo.blockCount = 1;
    5078  outInfo.allocationCount = 1;
    5079  outInfo.unusedRangeCount = 0;
    5080  outInfo.usedBytes = m_Size;
    5081  outInfo.unusedBytes = 0;
    5082  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
    5083  outInfo.unusedRangeSizeMin = UINT64_MAX;
    5084  outInfo.unusedRangeSizeMax = 0;
    5085  }
    5086 
    5087  void BlockAllocMap();
    5088  void BlockAllocUnmap();
    5089  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
    5090  void DedicatedAllocUnmap(VmaAllocator hAllocator);
    5091 
    5092 #if VMA_STATS_STRING_ENABLED
    5093  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
    5094  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
    5095 
    5096  void InitBufferImageUsage(uint32_t bufferImageUsage)
    5097  {
    5098  VMA_ASSERT(m_BufferImageUsage == 0);
    5099  m_BufferImageUsage = bufferImageUsage;
    5100  }
    5101 
    5102  void PrintParameters(class VmaJsonWriter& json) const;
    5103 #endif
    5104 
    5105 private:
    5106  VkDeviceSize m_Alignment;
    5107  VkDeviceSize m_Size;
    5108  void* m_pUserData;
    5109  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
    5110  uint8_t m_Type; // ALLOCATION_TYPE
    5111  uint8_t m_SuballocationType; // VmaSuballocationType
    5112  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
    5113  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
    5114  uint8_t m_MapCount;
    5115  uint8_t m_Flags; // enum FLAGS
    5116 
    5117  // Allocation out of VmaDeviceMemoryBlock.
    5118  struct BlockAllocation
    5119  {
    5120  VmaDeviceMemoryBlock* m_Block;
    5121  VkDeviceSize m_Offset;
    5122  bool m_CanBecomeLost;
    5123  };
    5124 
    5125  // Allocation for an object that has its own private VkDeviceMemory.
    5126  struct DedicatedAllocation
    5127  {
    5128  uint32_t m_MemoryTypeIndex;
    5129  VkDeviceMemory m_hMemory;
    5130  void* m_pMappedData; // Not null means memory is mapped.
    5131  };
    5132 
    5133  union
    5134  {
    5135  // Allocation out of VmaDeviceMemoryBlock.
    5136  BlockAllocation m_BlockAllocation;
    5137  // Allocation for an object that has its own private VkDeviceMemory.
    5138  DedicatedAllocation m_DedicatedAllocation;
    5139  };
    5140 
    5141 #if VMA_STATS_STRING_ENABLED
    5142  uint32_t m_CreationFrameIndex;
    5143  uint32_t m_BufferImageUsage; // 0 if unknown.
    5144 #endif
    5145 
    5146  void FreeUserDataString(VmaAllocator hAllocator);
    5147 };
    5148 
    5149 /*
    5150 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
    5151 allocated memory block or free.
    5152 */
    5153 struct VmaSuballocation
    5154 {
    5155  VkDeviceSize offset;
    5156  VkDeviceSize size;
    5157  VmaAllocation hAllocation;
    5158  VmaSuballocationType type;
    5159 };
    5160 
    5161 // Comparator for offsets.
    5162 struct VmaSuballocationOffsetLess
    5163 {
    5164  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5165  {
    5166  return lhs.offset < rhs.offset;
    5167  }
    5168 };
    5169 struct VmaSuballocationOffsetGreater
    5170 {
    5171  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
    5172  {
    5173  return lhs.offset > rhs.offset;
    5174  }
    5175 };
    5176 
    5177 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
    5178 
    5179 // Cost of one additional allocation lost, as equivalent in bytes.
    5180 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
    5181 
    5182 enum class VmaAllocationRequestType
    5183 {
    5184  Normal,
    5185  // Used by "Linear" algorithm.
    5186  UpperAddress,
    5187  EndOf1st,
    5188  EndOf2nd,
    5189 };
    5190 
    5191 /*
    5192 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
    5193 
    5194 If canMakeOtherLost was false:
    5195 - item points to a FREE suballocation.
    5196 - itemsToMakeLostCount is 0.
    5197 
    5198 If canMakeOtherLost was true:
    5199 - item points to first of sequence of suballocations, which are either FREE,
    5200  or point to VmaAllocations that can become lost.
    5201 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
    5202  the requested allocation to succeed.
    5203 */
    5204 struct VmaAllocationRequest
    5205 {
    5206  VkDeviceSize offset;
    5207  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
    5208  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
    5209  VmaSuballocationList::iterator item;
    5210  size_t itemsToMakeLostCount;
    5211  void* customData;
    5212  VmaAllocationRequestType type;
    5213 
    5214  VkDeviceSize CalcCost() const
    5215  {
    5216  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
    5217  }
    5218 };
    5219 
    5220 /*
    5221 Data structure used for bookkeeping of allocations and unused ranges of memory
    5222 in a single VkDeviceMemory block.
    5223 */
    5224 class VmaBlockMetadata
    5225 {
    5226 public:
    5227  VmaBlockMetadata(VmaAllocator hAllocator);
    5228  virtual ~VmaBlockMetadata() { }
    5229  virtual void Init(VkDeviceSize size) { m_Size = size; }
    5230 
    5231  // Validates all data structures inside this object. If not valid, returns false.
    5232  virtual bool Validate() const = 0;
    5233  VkDeviceSize GetSize() const { return m_Size; }
    5234  virtual size_t GetAllocationCount() const = 0;
    5235  virtual VkDeviceSize GetSumFreeSize() const = 0;
    5236  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
    5237  // Returns true if this block is empty - contains only single free suballocation.
    5238  virtual bool IsEmpty() const = 0;
    5239 
    5240  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
    5241  // Shouldn't modify blockCount.
    5242  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
    5243 
    5244 #if VMA_STATS_STRING_ENABLED
    5245  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
    5246 #endif
    5247 
    5248  // Tries to find a place for suballocation with given parameters inside this block.
    5249  // If succeeded, fills pAllocationRequest and returns true.
    5250  // If failed, returns false.
    5251  virtual bool CreateAllocationRequest(
    5252  uint32_t currentFrameIndex,
    5253  uint32_t frameInUseCount,
    5254  VkDeviceSize bufferImageGranularity,
    5255  VkDeviceSize allocSize,
    5256  VkDeviceSize allocAlignment,
    5257  bool upperAddress,
    5258  VmaSuballocationType allocType,
    5259  bool canMakeOtherLost,
    5260  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
    5261  uint32_t strategy,
    5262  VmaAllocationRequest* pAllocationRequest) = 0;
    5263 
    5264  virtual bool MakeRequestedAllocationsLost(
    5265  uint32_t currentFrameIndex,
    5266  uint32_t frameInUseCount,
    5267  VmaAllocationRequest* pAllocationRequest) = 0;
    5268 
    5269  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
    5270 
    5271  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
    5272 
    5273  // Makes actual allocation based on request. Request must already be checked and valid.
    5274  virtual void Alloc(
    5275  const VmaAllocationRequest& request,
    5276  VmaSuballocationType type,
    5277  VkDeviceSize allocSize,
    5278  VmaAllocation hAllocation) = 0;
    5279 
    5280  // Frees suballocation assigned to given memory region.
    5281  virtual void Free(const VmaAllocation allocation) = 0;
    5282  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
    5283 
    5284  // Tries to resize (grow or shrink) space for given allocation, in place.
    5285  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize) { return false; }
    5286 
    5287 protected:
    5288  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
    5289 
    5290 #if VMA_STATS_STRING_ENABLED
    5291  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
    5292  VkDeviceSize unusedBytes,
    5293  size_t allocationCount,
    5294  size_t unusedRangeCount) const;
    5295  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    5296  VkDeviceSize offset,
    5297  VmaAllocation hAllocation) const;
    5298  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    5299  VkDeviceSize offset,
    5300  VkDeviceSize size) const;
    5301  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
    5302 #endif
    5303 
    5304 private:
    5305  VkDeviceSize m_Size;
    5306  const VkAllocationCallbacks* m_pAllocationCallbacks;
    5307 };
    5308 
    5309 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
    5310  VMA_ASSERT(0 && "Validation failed: " #cond); \
    5311  return false; \
    5312  } } while(false)
    5313 
    5314 class VmaBlockMetadata_Generic : public VmaBlockMetadata
    5315 {
    5316  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
    5317 public:
    5318  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
    5319  virtual ~VmaBlockMetadata_Generic();
    5320  virtual void Init(VkDeviceSize size);
    5321 
    5322  virtual bool Validate() const;
    5323  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
    5324  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5325  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5326  virtual bool IsEmpty() const;
    5327 
    5328  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5329  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5330 
    5331 #if VMA_STATS_STRING_ENABLED
    5332  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5333 #endif
    5334 
    5335  virtual bool CreateAllocationRequest(
    5336  uint32_t currentFrameIndex,
    5337  uint32_t frameInUseCount,
    5338  VkDeviceSize bufferImageGranularity,
    5339  VkDeviceSize allocSize,
    5340  VkDeviceSize allocAlignment,
    5341  bool upperAddress,
    5342  VmaSuballocationType allocType,
    5343  bool canMakeOtherLost,
    5344  uint32_t strategy,
    5345  VmaAllocationRequest* pAllocationRequest);
    5346 
    5347  virtual bool MakeRequestedAllocationsLost(
    5348  uint32_t currentFrameIndex,
    5349  uint32_t frameInUseCount,
    5350  VmaAllocationRequest* pAllocationRequest);
    5351 
    5352  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5353 
    5354  virtual VkResult CheckCorruption(const void* pBlockData);
    5355 
    5356  virtual void Alloc(
    5357  const VmaAllocationRequest& request,
    5358  VmaSuballocationType type,
    5359  VkDeviceSize allocSize,
    5360  VmaAllocation hAllocation);
    5361 
    5362  virtual void Free(const VmaAllocation allocation);
    5363  virtual void FreeAtOffset(VkDeviceSize offset);
    5364 
    5365  virtual bool ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize);
    5366 
    5368  // For defragmentation
    5369 
    5370  bool IsBufferImageGranularityConflictPossible(
    5371  VkDeviceSize bufferImageGranularity,
    5372  VmaSuballocationType& inOutPrevSuballocType) const;
    5373 
    5374 private:
    5375  friend class VmaDefragmentationAlgorithm_Generic;
    5376  friend class VmaDefragmentationAlgorithm_Fast;
    5377 
    5378  uint32_t m_FreeCount;
    5379  VkDeviceSize m_SumFreeSize;
    5380  VmaSuballocationList m_Suballocations;
    5381  // Suballocations that are free and have size greater than certain threshold.
    5382  // Sorted by size, ascending.
    5383  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
    5384 
    5385  bool ValidateFreeSuballocationList() const;
    5386 
    5387  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
    5388  // If yes, fills pOffset and returns true. If no, returns false.
    5389  bool CheckAllocation(
    5390  uint32_t currentFrameIndex,
    5391  uint32_t frameInUseCount,
    5392  VkDeviceSize bufferImageGranularity,
    5393  VkDeviceSize allocSize,
    5394  VkDeviceSize allocAlignment,
    5395  VmaSuballocationType allocType,
    5396  VmaSuballocationList::const_iterator suballocItem,
    5397  bool canMakeOtherLost,
    5398  VkDeviceSize* pOffset,
    5399  size_t* itemsToMakeLostCount,
    5400  VkDeviceSize* pSumFreeSize,
    5401  VkDeviceSize* pSumItemSize) const;
    5402  // Given free suballocation, it merges it with following one, which must also be free.
    5403  void MergeFreeWithNext(VmaSuballocationList::iterator item);
    5404  // Releases given suballocation, making it free.
    5405  // Merges it with adjacent free suballocations if applicable.
    5406  // Returns iterator to new free suballocation at this place.
    5407  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
    5408  // Given free suballocation, it inserts it into sorted list of
    5409  // m_FreeSuballocationsBySize if it's suitable.
    5410  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
    5411  // Given free suballocation, it removes it from sorted list of
    5412  // m_FreeSuballocationsBySize if it's suitable.
    5413  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
    5414 };
    5415 
    5416 /*
    5417 Allocations and their references in internal data structure look like this:
    5418 
    5419 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
    5420 
    5421  0 +-------+
    5422  | |
    5423  | |
    5424  | |
    5425  +-------+
    5426  | Alloc | 1st[m_1stNullItemsBeginCount]
    5427  +-------+
    5428  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5429  +-------+
    5430  | ... |
    5431  +-------+
    5432  | Alloc | 1st[1st.size() - 1]
    5433  +-------+
    5434  | |
    5435  | |
    5436  | |
    5437 GetSize() +-------+
    5438 
    5439 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
    5440 
    5441  0 +-------+
    5442  | Alloc | 2nd[0]
    5443  +-------+
    5444  | Alloc | 2nd[1]
    5445  +-------+
    5446  | ... |
    5447  +-------+
    5448  | Alloc | 2nd[2nd.size() - 1]
    5449  +-------+
    5450  | |
    5451  | |
    5452  | |
    5453  +-------+
    5454  | Alloc | 1st[m_1stNullItemsBeginCount]
    5455  +-------+
    5456  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5457  +-------+
    5458  | ... |
    5459  +-------+
    5460  | Alloc | 1st[1st.size() - 1]
    5461  +-------+
    5462  | |
    5463 GetSize() +-------+
    5464 
    5465 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
    5466 
    5467  0 +-------+
    5468  | |
    5469  | |
    5470  | |
    5471  +-------+
    5472  | Alloc | 1st[m_1stNullItemsBeginCount]
    5473  +-------+
    5474  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
    5475  +-------+
    5476  | ... |
    5477  +-------+
    5478  | Alloc | 1st[1st.size() - 1]
    5479  +-------+
    5480  | |
    5481  | |
    5482  | |
    5483  +-------+
    5484  | Alloc | 2nd[2nd.size() - 1]
    5485  +-------+
    5486  | ... |
    5487  +-------+
    5488  | Alloc | 2nd[1]
    5489  +-------+
    5490  | Alloc | 2nd[0]
    5491 GetSize() +-------+
    5492 
    5493 */
    5494 class VmaBlockMetadata_Linear : public VmaBlockMetadata
    5495 {
    5496  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
    5497 public:
    5498  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
    5499  virtual ~VmaBlockMetadata_Linear();
    5500  virtual void Init(VkDeviceSize size);
    5501 
    5502  virtual bool Validate() const;
    5503  virtual size_t GetAllocationCount() const;
    5504  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
    5505  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5506  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
    5507 
    5508  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5509  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5510 
    5511 #if VMA_STATS_STRING_ENABLED
    5512  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5513 #endif
    5514 
    5515  virtual bool CreateAllocationRequest(
    5516  uint32_t currentFrameIndex,
    5517  uint32_t frameInUseCount,
    5518  VkDeviceSize bufferImageGranularity,
    5519  VkDeviceSize allocSize,
    5520  VkDeviceSize allocAlignment,
    5521  bool upperAddress,
    5522  VmaSuballocationType allocType,
    5523  bool canMakeOtherLost,
    5524  uint32_t strategy,
    5525  VmaAllocationRequest* pAllocationRequest);
    5526 
    5527  virtual bool MakeRequestedAllocationsLost(
    5528  uint32_t currentFrameIndex,
    5529  uint32_t frameInUseCount,
    5530  VmaAllocationRequest* pAllocationRequest);
    5531 
    5532  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5533 
    5534  virtual VkResult CheckCorruption(const void* pBlockData);
    5535 
    5536  virtual void Alloc(
    5537  const VmaAllocationRequest& request,
    5538  VmaSuballocationType type,
    5539  VkDeviceSize allocSize,
    5540  VmaAllocation hAllocation);
    5541 
    5542  virtual void Free(const VmaAllocation allocation);
    5543  virtual void FreeAtOffset(VkDeviceSize offset);
    5544 
    5545 private:
    5546  /*
    5547  There are two suballocation vectors, used in ping-pong way.
    5548  The one with index m_1stVectorIndex is called 1st.
    5549  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
    5550  2nd can be non-empty only when 1st is not empty.
    5551  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
    5552  */
    5553  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
    5554 
    5555  enum SECOND_VECTOR_MODE
    5556  {
    5557  SECOND_VECTOR_EMPTY,
    5558  /*
    5559  Suballocations in 2nd vector are created later than the ones in 1st, but they
    5560  all have smaller offset.
    5561  */
    5562  SECOND_VECTOR_RING_BUFFER,
    5563  /*
    5564  Suballocations in 2nd vector are upper side of double stack.
    5565  They all have offsets higher than those in 1st vector.
    5566  Top of this stack means smaller offsets, but higher indices in this vector.
    5567  */
    5568  SECOND_VECTOR_DOUBLE_STACK,
    5569  };
    5570 
    5571  VkDeviceSize m_SumFreeSize;
    5572  SuballocationVectorType m_Suballocations0, m_Suballocations1;
    5573  uint32_t m_1stVectorIndex;
    5574  SECOND_VECTOR_MODE m_2ndVectorMode;
    5575 
    5576  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5577  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5578  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
    5579  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
    5580 
    5581  // Number of items in 1st vector with hAllocation = null at the beginning.
    5582  size_t m_1stNullItemsBeginCount;
    5583  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
    5584  size_t m_1stNullItemsMiddleCount;
    5585  // Number of items in 2nd vector with hAllocation = null.
    5586  size_t m_2ndNullItemsCount;
    5587 
    5588  bool ShouldCompact1st() const;
    5589  void CleanupAfterFree();
    5590 
    5591  bool CreateAllocationRequest_LowerAddress(
    5592  uint32_t currentFrameIndex,
    5593  uint32_t frameInUseCount,
    5594  VkDeviceSize bufferImageGranularity,
    5595  VkDeviceSize allocSize,
    5596  VkDeviceSize allocAlignment,
    5597  VmaSuballocationType allocType,
    5598  bool canMakeOtherLost,
    5599  uint32_t strategy,
    5600  VmaAllocationRequest* pAllocationRequest);
    5601  bool CreateAllocationRequest_UpperAddress(
    5602  uint32_t currentFrameIndex,
    5603  uint32_t frameInUseCount,
    5604  VkDeviceSize bufferImageGranularity,
    5605  VkDeviceSize allocSize,
    5606  VkDeviceSize allocAlignment,
    5607  VmaSuballocationType allocType,
    5608  bool canMakeOtherLost,
    5609  uint32_t strategy,
    5610  VmaAllocationRequest* pAllocationRequest);
    5611 };
    5612 
    5613 /*
    5614 - GetSize() is the original size of allocated memory block.
    5615 - m_UsableSize is this size aligned down to a power of two.
    5616  All allocations and calculations happen relative to m_UsableSize.
    5617 - GetUnusableSize() is the difference between them.
    5618  It is repoted as separate, unused range, not available for allocations.
    5619 
    5620 Node at level 0 has size = m_UsableSize.
    5621 Each next level contains nodes with size 2 times smaller than current level.
    5622 m_LevelCount is the maximum number of levels to use in the current object.
    5623 */
    5624 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
    5625 {
    5626  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
    5627 public:
    5628  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
    5629  virtual ~VmaBlockMetadata_Buddy();
    5630  virtual void Init(VkDeviceSize size);
    5631 
    5632  virtual bool Validate() const;
    5633  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
    5634  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
    5635  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
    5636  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
    5637 
    5638  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
    5639  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
    5640 
    5641 #if VMA_STATS_STRING_ENABLED
    5642  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
    5643 #endif
    5644 
    5645  virtual bool CreateAllocationRequest(
    5646  uint32_t currentFrameIndex,
    5647  uint32_t frameInUseCount,
    5648  VkDeviceSize bufferImageGranularity,
    5649  VkDeviceSize allocSize,
    5650  VkDeviceSize allocAlignment,
    5651  bool upperAddress,
    5652  VmaSuballocationType allocType,
    5653  bool canMakeOtherLost,
    5654  uint32_t strategy,
    5655  VmaAllocationRequest* pAllocationRequest);
    5656 
    5657  virtual bool MakeRequestedAllocationsLost(
    5658  uint32_t currentFrameIndex,
    5659  uint32_t frameInUseCount,
    5660  VmaAllocationRequest* pAllocationRequest);
    5661 
    5662  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
    5663 
    5664  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
    5665 
    5666  virtual void Alloc(
    5667  const VmaAllocationRequest& request,
    5668  VmaSuballocationType type,
    5669  VkDeviceSize allocSize,
    5670  VmaAllocation hAllocation);
    5671 
    5672  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
    5673  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
    5674 
    5675 private:
    5676  static const VkDeviceSize MIN_NODE_SIZE = 32;
    5677  static const size_t MAX_LEVELS = 30;
    5678 
    5679  struct ValidationContext
    5680  {
    5681  size_t calculatedAllocationCount;
    5682  size_t calculatedFreeCount;
    5683  VkDeviceSize calculatedSumFreeSize;
    5684 
    5685  ValidationContext() :
    5686  calculatedAllocationCount(0),
    5687  calculatedFreeCount(0),
    5688  calculatedSumFreeSize(0) { }
    5689  };
    5690 
    5691  struct Node
    5692  {
    5693  VkDeviceSize offset;
    5694  enum TYPE
    5695  {
    5696  TYPE_FREE,
    5697  TYPE_ALLOCATION,
    5698  TYPE_SPLIT,
    5699  TYPE_COUNT
    5700  } type;
    5701  Node* parent;
    5702  Node* buddy;
    5703 
    5704  union
    5705  {
    5706  struct
    5707  {
    5708  Node* prev;
    5709  Node* next;
    5710  } free;
    5711  struct
    5712  {
    5713  VmaAllocation alloc;
    5714  } allocation;
    5715  struct
    5716  {
    5717  Node* leftChild;
    5718  } split;
    5719  };
    5720  };
    5721 
    5722  // Size of the memory block aligned down to a power of two.
    5723  VkDeviceSize m_UsableSize;
    5724  uint32_t m_LevelCount;
    5725 
    5726  Node* m_Root;
    5727  struct {
    5728  Node* front;
    5729  Node* back;
    5730  } m_FreeList[MAX_LEVELS];
    5731  // Number of nodes in the tree with type == TYPE_ALLOCATION.
    5732  size_t m_AllocationCount;
    5733  // Number of nodes in the tree with type == TYPE_FREE.
    5734  size_t m_FreeCount;
    5735  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
    5736  VkDeviceSize m_SumFreeSize;
    5737 
    5738  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
    5739  void DeleteNode(Node* node);
    5740  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
    5741  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
    5742  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
    5743  // Alloc passed just for validation. Can be null.
    5744  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
    5745  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
    5746  // Adds node to the front of FreeList at given level.
    5747  // node->type must be FREE.
    5748  // node->free.prev, next can be undefined.
    5749  void AddToFreeListFront(uint32_t level, Node* node);
    5750  // Removes node from FreeList at given level.
    5751  // node->type must be FREE.
    5752  // node->free.prev, next stay untouched.
    5753  void RemoveFromFreeList(uint32_t level, Node* node);
    5754 
    5755 #if VMA_STATS_STRING_ENABLED
    5756  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
    5757 #endif
    5758 };
    5759 
    5760 /*
    5761 Represents a single block of device memory (`VkDeviceMemory`) with all the
    5762 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
    5763 
    5764 Thread-safety: This class must be externally synchronized.
    5765 */
    5766 class VmaDeviceMemoryBlock
    5767 {
    5768  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
    5769 public:
    5770  VmaBlockMetadata* m_pMetadata;
    5771 
    5772  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
    5773 
    5774  ~VmaDeviceMemoryBlock()
    5775  {
    5776  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
    5777  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    5778  }
    5779 
    5780  // Always call after construction.
    5781  void Init(
    5782  VmaAllocator hAllocator,
    5783  VmaPool hParentPool,
    5784  uint32_t newMemoryTypeIndex,
    5785  VkDeviceMemory newMemory,
    5786  VkDeviceSize newSize,
    5787  uint32_t id,
    5788  uint32_t algorithm);
    5789  // Always call before destruction.
    5790  void Destroy(VmaAllocator allocator);
    5791 
    5792  VmaPool GetParentPool() const { return m_hParentPool; }
    5793  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
    5794  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5795  uint32_t GetId() const { return m_Id; }
    5796  void* GetMappedData() const { return m_pMappedData; }
    5797 
    5798  // Validates all data structures inside this object. If not valid, returns false.
    5799  bool Validate() const;
    5800 
    5801  VkResult CheckCorruption(VmaAllocator hAllocator);
    5802 
    5803  // ppData can be null.
    5804  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
    5805  void Unmap(VmaAllocator hAllocator, uint32_t count);
    5806 
    5807  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5808  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
    5809 
    5810  VkResult BindBufferMemory(
    5811  const VmaAllocator hAllocator,
    5812  const VmaAllocation hAllocation,
    5813  VkBuffer hBuffer);
    5814  VkResult BindImageMemory(
    5815  const VmaAllocator hAllocator,
    5816  const VmaAllocation hAllocation,
    5817  VkImage hImage);
    5818 
    5819 private:
    5820  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
    5821  uint32_t m_MemoryTypeIndex;
    5822  uint32_t m_Id;
    5823  VkDeviceMemory m_hMemory;
    5824 
    5825  /*
    5826  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
    5827  Also protects m_MapCount, m_pMappedData.
    5828  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
    5829  */
    5830  VMA_MUTEX m_Mutex;
    5831  uint32_t m_MapCount;
    5832  void* m_pMappedData;
    5833 };
    5834 
    5835 struct VmaPointerLess
    5836 {
    5837  bool operator()(const void* lhs, const void* rhs) const
    5838  {
    5839  return lhs < rhs;
    5840  }
    5841 };
    5842 
    5843 struct VmaDefragmentationMove
    5844 {
    5845  size_t srcBlockIndex;
    5846  size_t dstBlockIndex;
    5847  VkDeviceSize srcOffset;
    5848  VkDeviceSize dstOffset;
    5849  VkDeviceSize size;
    5850 };
    5851 
    5852 class VmaDefragmentationAlgorithm;
    5853 
    5854 /*
    5855 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
    5856 Vulkan memory type.
    5857 
    5858 Synchronized internally with a mutex.
    5859 */
    5860 struct VmaBlockVector
    5861 {
    5862  VMA_CLASS_NO_COPY(VmaBlockVector)
    5863 public:
    5864  VmaBlockVector(
    5865  VmaAllocator hAllocator,
    5866  VmaPool hParentPool,
    5867  uint32_t memoryTypeIndex,
    5868  VkDeviceSize preferredBlockSize,
    5869  size_t minBlockCount,
    5870  size_t maxBlockCount,
    5871  VkDeviceSize bufferImageGranularity,
    5872  uint32_t frameInUseCount,
    5873  bool isCustomPool,
    5874  bool explicitBlockSize,
    5875  uint32_t algorithm);
    5876  ~VmaBlockVector();
    5877 
    5878  VkResult CreateMinBlocks();
    5879 
    5880  VmaPool GetParentPool() const { return m_hParentPool; }
    5881  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
    5882  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
    5883  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
    5884  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
    5885  uint32_t GetAlgorithm() const { return m_Algorithm; }
    5886 
    5887  void GetPoolStats(VmaPoolStats* pStats);
    5888 
    5889  bool IsEmpty() const { return m_Blocks.empty(); }
    5890  bool IsCorruptionDetectionEnabled() const;
    5891 
    5892  VkResult Allocate(
    5893  uint32_t currentFrameIndex,
    5894  VkDeviceSize size,
    5895  VkDeviceSize alignment,
    5896  const VmaAllocationCreateInfo& createInfo,
    5897  VmaSuballocationType suballocType,
    5898  size_t allocationCount,
    5899  VmaAllocation* pAllocations);
    5900 
    5901  void Free(
    5902  VmaAllocation hAllocation);
    5903 
    5904  // Adds statistics of this BlockVector to pStats.
    5905  void AddStats(VmaStats* pStats);
    5906 
    5907 #if VMA_STATS_STRING_ENABLED
    5908  void PrintDetailedMap(class VmaJsonWriter& json);
    5909 #endif
    5910 
    5911  void MakePoolAllocationsLost(
    5912  uint32_t currentFrameIndex,
    5913  size_t* pLostAllocationCount);
    5914  VkResult CheckCorruption();
    5915 
    5916  // Saves results in pCtx->res.
    5917  void Defragment(
    5918  class VmaBlockVectorDefragmentationContext* pCtx,
    5919  VmaDefragmentationStats* pStats,
    5920  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    5921  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    5922  VkCommandBuffer commandBuffer);
    5923  void DefragmentationEnd(
    5924  class VmaBlockVectorDefragmentationContext* pCtx,
    5925  VmaDefragmentationStats* pStats);
    5926 
    5928  // To be used only while the m_Mutex is locked. Used during defragmentation.
    5929 
    5930  size_t GetBlockCount() const { return m_Blocks.size(); }
    5931  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
    5932  size_t CalcAllocationCount() const;
    5933  bool IsBufferImageGranularityConflictPossible() const;
    5934 
    5935 private:
    5936  friend class VmaDefragmentationAlgorithm_Generic;
    5937 
    5938  const VmaAllocator m_hAllocator;
    5939  const VmaPool m_hParentPool;
    5940  const uint32_t m_MemoryTypeIndex;
    5941  const VkDeviceSize m_PreferredBlockSize;
    5942  const size_t m_MinBlockCount;
    5943  const size_t m_MaxBlockCount;
    5944  const VkDeviceSize m_BufferImageGranularity;
    5945  const uint32_t m_FrameInUseCount;
    5946  const bool m_IsCustomPool;
    5947  const bool m_ExplicitBlockSize;
    5948  const uint32_t m_Algorithm;
    5949  /* There can be at most one allocation that is completely empty - a
    5950  hysteresis to avoid pessimistic case of alternating creation and destruction
    5951  of a VkDeviceMemory. */
    5952  bool m_HasEmptyBlock;
    5953  VMA_RW_MUTEX m_Mutex;
    5954  // Incrementally sorted by sumFreeSize, ascending.
    5955  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
    5956  uint32_t m_NextBlockId;
    5957 
    5958  VkDeviceSize CalcMaxBlockSize() const;
    5959 
    5960  // Finds and removes given block from vector.
    5961  void Remove(VmaDeviceMemoryBlock* pBlock);
    5962 
    5963  // Performs single step in sorting m_Blocks. They may not be fully sorted
    5964  // after this call.
    5965  void IncrementallySortBlocks();
    5966 
    5967  VkResult AllocatePage(
    5968  uint32_t currentFrameIndex,
    5969  VkDeviceSize size,
    5970  VkDeviceSize alignment,
    5971  const VmaAllocationCreateInfo& createInfo,
    5972  VmaSuballocationType suballocType,
    5973  VmaAllocation* pAllocation);
    5974 
    5975  // To be used only without CAN_MAKE_OTHER_LOST flag.
    5976  VkResult AllocateFromBlock(
    5977  VmaDeviceMemoryBlock* pBlock,
    5978  uint32_t currentFrameIndex,
    5979  VkDeviceSize size,
    5980  VkDeviceSize alignment,
    5981  VmaAllocationCreateFlags allocFlags,
    5982  void* pUserData,
    5983  VmaSuballocationType suballocType,
    5984  uint32_t strategy,
    5985  VmaAllocation* pAllocation);
    5986 
    5987  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
    5988 
    5989  // Saves result to pCtx->res.
    5990  void ApplyDefragmentationMovesCpu(
    5991  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    5992  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
    5993  // Saves result to pCtx->res.
    5994  void ApplyDefragmentationMovesGpu(
    5995  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    5996  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    5997  VkCommandBuffer commandBuffer);
    5998 
    5999  /*
    6000  Used during defragmentation. pDefragmentationStats is optional. It's in/out
    6001  - updated with new data.
    6002  */
    6003  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
    6004 };
    6005 
    6006 struct VmaPool_T
    6007 {
    6008  VMA_CLASS_NO_COPY(VmaPool_T)
    6009 public:
    6010  VmaBlockVector m_BlockVector;
    6011 
    6012  VmaPool_T(
    6013  VmaAllocator hAllocator,
    6014  const VmaPoolCreateInfo& createInfo,
    6015  VkDeviceSize preferredBlockSize);
    6016  ~VmaPool_T();
    6017 
    6018  uint32_t GetId() const { return m_Id; }
    6019  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
    6020 
    6021 #if VMA_STATS_STRING_ENABLED
    6022  //void PrintDetailedMap(class VmaStringBuilder& sb);
    6023 #endif
    6024 
    6025 private:
    6026  uint32_t m_Id;
    6027 };
    6028 
    6029 /*
    6030 Performs defragmentation:
    6031 
    6032 - Updates `pBlockVector->m_pMetadata`.
    6033 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
    6034 - Does not move actual data, only returns requested moves as `moves`.
    6035 */
    6036 class VmaDefragmentationAlgorithm
    6037 {
    6038  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
    6039 public:
    6040  VmaDefragmentationAlgorithm(
    6041  VmaAllocator hAllocator,
    6042  VmaBlockVector* pBlockVector,
    6043  uint32_t currentFrameIndex) :
    6044  m_hAllocator(hAllocator),
    6045  m_pBlockVector(pBlockVector),
    6046  m_CurrentFrameIndex(currentFrameIndex)
    6047  {
    6048  }
    6049  virtual ~VmaDefragmentationAlgorithm()
    6050  {
    6051  }
    6052 
    6053  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
    6054  virtual void AddAll() = 0;
    6055 
    6056  virtual VkResult Defragment(
    6057  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6058  VkDeviceSize maxBytesToMove,
    6059  uint32_t maxAllocationsToMove) = 0;
    6060 
    6061  virtual VkDeviceSize GetBytesMoved() const = 0;
    6062  virtual uint32_t GetAllocationsMoved() const = 0;
    6063 
    6064 protected:
    6065  VmaAllocator const m_hAllocator;
    6066  VmaBlockVector* const m_pBlockVector;
    6067  const uint32_t m_CurrentFrameIndex;
    6068 
    6069  struct AllocationInfo
    6070  {
    6071  VmaAllocation m_hAllocation;
    6072  VkBool32* m_pChanged;
    6073 
    6074  AllocationInfo() :
    6075  m_hAllocation(VK_NULL_HANDLE),
    6076  m_pChanged(VMA_NULL)
    6077  {
    6078  }
    6079  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
    6080  m_hAllocation(hAlloc),
    6081  m_pChanged(pChanged)
    6082  {
    6083  }
    6084  };
    6085 };
    6086 
    6087 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
    6088 {
    6089  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
    6090 public:
    6091  VmaDefragmentationAlgorithm_Generic(
    6092  VmaAllocator hAllocator,
    6093  VmaBlockVector* pBlockVector,
    6094  uint32_t currentFrameIndex,
    6095  bool overlappingMoveSupported);
    6096  virtual ~VmaDefragmentationAlgorithm_Generic();
    6097 
    6098  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6099  virtual void AddAll() { m_AllAllocations = true; }
    6100 
    6101  virtual VkResult Defragment(
    6102  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6103  VkDeviceSize maxBytesToMove,
    6104  uint32_t maxAllocationsToMove);
    6105 
    6106  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6107  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6108 
    6109 private:
    6110  uint32_t m_AllocationCount;
    6111  bool m_AllAllocations;
    6112 
    6113  VkDeviceSize m_BytesMoved;
    6114  uint32_t m_AllocationsMoved;
    6115 
    6116  struct AllocationInfoSizeGreater
    6117  {
    6118  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6119  {
    6120  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
    6121  }
    6122  };
    6123 
    6124  struct AllocationInfoOffsetGreater
    6125  {
    6126  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
    6127  {
    6128  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
    6129  }
    6130  };
    6131 
    6132  struct BlockInfo
    6133  {
    6134  size_t m_OriginalBlockIndex;
    6135  VmaDeviceMemoryBlock* m_pBlock;
    6136  bool m_HasNonMovableAllocations;
    6137  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
    6138 
    6139  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
    6140  m_OriginalBlockIndex(SIZE_MAX),
    6141  m_pBlock(VMA_NULL),
    6142  m_HasNonMovableAllocations(true),
    6143  m_Allocations(pAllocationCallbacks)
    6144  {
    6145  }
    6146 
    6147  void CalcHasNonMovableAllocations()
    6148  {
    6149  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
    6150  const size_t defragmentAllocCount = m_Allocations.size();
    6151  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
    6152  }
    6153 
    6154  void SortAllocationsBySizeDescending()
    6155  {
    6156  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
    6157  }
    6158 
    6159  void SortAllocationsByOffsetDescending()
    6160  {
    6161  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
    6162  }
    6163  };
    6164 
    6165  struct BlockPointerLess
    6166  {
    6167  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
    6168  {
    6169  return pLhsBlockInfo->m_pBlock < pRhsBlock;
    6170  }
    6171  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6172  {
    6173  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
    6174  }
    6175  };
    6176 
    6177  // 1. Blocks with some non-movable allocations go first.
    6178  // 2. Blocks with smaller sumFreeSize go first.
    6179  struct BlockInfoCompareMoveDestination
    6180  {
    6181  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
    6182  {
    6183  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
    6184  {
    6185  return true;
    6186  }
    6187  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
    6188  {
    6189  return false;
    6190  }
    6191  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
    6192  {
    6193  return true;
    6194  }
    6195  return false;
    6196  }
    6197  };
    6198 
    6199  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
    6200  BlockInfoVector m_Blocks;
    6201 
    6202  VkResult DefragmentRound(
    6203  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6204  VkDeviceSize maxBytesToMove,
    6205  uint32_t maxAllocationsToMove);
    6206 
    6207  size_t CalcBlocksWithNonMovableCount() const;
    6208 
    6209  static bool MoveMakesSense(
    6210  size_t dstBlockIndex, VkDeviceSize dstOffset,
    6211  size_t srcBlockIndex, VkDeviceSize srcOffset);
    6212 };
    6213 
    6214 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
    6215 {
    6216  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
    6217 public:
    6218  VmaDefragmentationAlgorithm_Fast(
    6219  VmaAllocator hAllocator,
    6220  VmaBlockVector* pBlockVector,
    6221  uint32_t currentFrameIndex,
    6222  bool overlappingMoveSupported);
    6223  virtual ~VmaDefragmentationAlgorithm_Fast();
    6224 
    6225  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
    6226  virtual void AddAll() { m_AllAllocations = true; }
    6227 
    6228  virtual VkResult Defragment(
    6229  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    6230  VkDeviceSize maxBytesToMove,
    6231  uint32_t maxAllocationsToMove);
    6232 
    6233  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
    6234  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
    6235 
    6236 private:
    6237  struct BlockInfo
    6238  {
    6239  size_t origBlockIndex;
    6240  };
    6241 
    6242  class FreeSpaceDatabase
    6243  {
    6244  public:
    6245  FreeSpaceDatabase()
    6246  {
    6247  FreeSpace s = {};
    6248  s.blockInfoIndex = SIZE_MAX;
    6249  for(size_t i = 0; i < MAX_COUNT; ++i)
    6250  {
    6251  m_FreeSpaces[i] = s;
    6252  }
    6253  }
    6254 
    6255  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
    6256  {
    6257  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6258  {
    6259  return;
    6260  }
    6261 
    6262  // Find first invalid or the smallest structure.
    6263  size_t bestIndex = SIZE_MAX;
    6264  for(size_t i = 0; i < MAX_COUNT; ++i)
    6265  {
    6266  // Empty structure.
    6267  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
    6268  {
    6269  bestIndex = i;
    6270  break;
    6271  }
    6272  if(m_FreeSpaces[i].size < size &&
    6273  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
    6274  {
    6275  bestIndex = i;
    6276  }
    6277  }
    6278 
    6279  if(bestIndex != SIZE_MAX)
    6280  {
    6281  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
    6282  m_FreeSpaces[bestIndex].offset = offset;
    6283  m_FreeSpaces[bestIndex].size = size;
    6284  }
    6285  }
    6286 
    6287  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
    6288  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
    6289  {
    6290  size_t bestIndex = SIZE_MAX;
    6291  VkDeviceSize bestFreeSpaceAfter = 0;
    6292  for(size_t i = 0; i < MAX_COUNT; ++i)
    6293  {
    6294  // Structure is valid.
    6295  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
    6296  {
    6297  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
    6298  // Allocation fits into this structure.
    6299  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
    6300  {
    6301  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
    6302  (dstOffset + size);
    6303  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
    6304  {
    6305  bestIndex = i;
    6306  bestFreeSpaceAfter = freeSpaceAfter;
    6307  }
    6308  }
    6309  }
    6310  }
    6311 
    6312  if(bestIndex != SIZE_MAX)
    6313  {
    6314  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
    6315  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
    6316 
    6317  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    6318  {
    6319  // Leave this structure for remaining empty space.
    6320  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
    6321  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
    6322  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
    6323  }
    6324  else
    6325  {
    6326  // This structure becomes invalid.
    6327  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
    6328  }
    6329 
    6330  return true;
    6331  }
    6332 
    6333  return false;
    6334  }
    6335 
    6336  private:
    6337  static const size_t MAX_COUNT = 4;
    6338 
    6339  struct FreeSpace
    6340  {
    6341  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
    6342  VkDeviceSize offset;
    6343  VkDeviceSize size;
    6344  } m_FreeSpaces[MAX_COUNT];
    6345  };
    6346 
    6347  const bool m_OverlappingMoveSupported;
    6348 
    6349  uint32_t m_AllocationCount;
    6350  bool m_AllAllocations;
    6351 
    6352  VkDeviceSize m_BytesMoved;
    6353  uint32_t m_AllocationsMoved;
    6354 
    6355  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
    6356 
    6357  void PreprocessMetadata();
    6358  void PostprocessMetadata();
    6359  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
    6360 };
    6361 
    6362 struct VmaBlockDefragmentationContext
    6363 {
    6364  enum BLOCK_FLAG
    6365  {
    6366  BLOCK_FLAG_USED = 0x00000001,
    6367  };
    6368  uint32_t flags;
    6369  VkBuffer hBuffer;
    6370 
    6371  VmaBlockDefragmentationContext() :
    6372  flags(0),
    6373  hBuffer(VK_NULL_HANDLE)
    6374  {
    6375  }
    6376 };
    6377 
    6378 class VmaBlockVectorDefragmentationContext
    6379 {
    6380  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
    6381 public:
    6382  VkResult res;
    6383  bool mutexLocked;
    6384  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
    6385 
    6386  VmaBlockVectorDefragmentationContext(
    6387  VmaAllocator hAllocator,
    6388  VmaPool hCustomPool, // Optional.
    6389  VmaBlockVector* pBlockVector,
    6390  uint32_t currFrameIndex,
    6391  uint32_t flags);
    6392  ~VmaBlockVectorDefragmentationContext();
    6393 
    6394  VmaPool GetCustomPool() const { return m_hCustomPool; }
    6395  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
    6396  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
    6397 
    6398  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
    6399  void AddAll() { m_AllAllocations = true; }
    6400 
    6401  void Begin(bool overlappingMoveSupported);
    6402 
    6403 private:
    6404  const VmaAllocator m_hAllocator;
    6405  // Null if not from custom pool.
    6406  const VmaPool m_hCustomPool;
    6407  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
    6408  VmaBlockVector* const m_pBlockVector;
    6409  const uint32_t m_CurrFrameIndex;
    6410  const uint32_t m_AlgorithmFlags;
    6411  // Owner of this object.
    6412  VmaDefragmentationAlgorithm* m_pAlgorithm;
    6413 
    6414  struct AllocInfo
    6415  {
    6416  VmaAllocation hAlloc;
    6417  VkBool32* pChanged;
    6418  };
    6419  // Used between constructor and Begin.
    6420  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
    6421  bool m_AllAllocations;
    6422 };
    6423 
    6424 struct VmaDefragmentationContext_T
    6425 {
    6426 private:
    6427  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
    6428 public:
    6429  VmaDefragmentationContext_T(
    6430  VmaAllocator hAllocator,
    6431  uint32_t currFrameIndex,
    6432  uint32_t flags,
    6433  VmaDefragmentationStats* pStats);
    6434  ~VmaDefragmentationContext_T();
    6435 
    6436  void AddPools(uint32_t poolCount, VmaPool* pPools);
    6437  void AddAllocations(
    6438  uint32_t allocationCount,
    6439  VmaAllocation* pAllocations,
    6440  VkBool32* pAllocationsChanged);
    6441 
    6442  /*
    6443  Returns:
    6444  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
    6445  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
    6446  - Negative value if error occured and object can be destroyed immediately.
    6447  */
    6448  VkResult Defragment(
    6449  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    6450  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    6451  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats);
    6452 
    6453 private:
    6454  const VmaAllocator m_hAllocator;
    6455  const uint32_t m_CurrFrameIndex;
    6456  const uint32_t m_Flags;
    6457  VmaDefragmentationStats* const m_pStats;
    6458  // Owner of these objects.
    6459  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
    6460  // Owner of these objects.
    6461  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
    6462 };
    6463 
    6464 #if VMA_RECORDING_ENABLED
    6465 
    6466 class VmaRecorder
    6467 {
    6468 public:
    6469  VmaRecorder();
    6470  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
    6471  void WriteConfiguration(
    6472  const VkPhysicalDeviceProperties& devProps,
    6473  const VkPhysicalDeviceMemoryProperties& memProps,
    6474  bool dedicatedAllocationExtensionEnabled);
    6475  ~VmaRecorder();
    6476 
    6477  void RecordCreateAllocator(uint32_t frameIndex);
    6478  void RecordDestroyAllocator(uint32_t frameIndex);
    6479  void RecordCreatePool(uint32_t frameIndex,
    6480  const VmaPoolCreateInfo& createInfo,
    6481  VmaPool pool);
    6482  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
    6483  void RecordAllocateMemory(uint32_t frameIndex,
    6484  const VkMemoryRequirements& vkMemReq,
    6485  const VmaAllocationCreateInfo& createInfo,
    6486  VmaAllocation allocation);
    6487  void RecordAllocateMemoryPages(uint32_t frameIndex,
    6488  const VkMemoryRequirements& vkMemReq,
    6489  const VmaAllocationCreateInfo& createInfo,
    6490  uint64_t allocationCount,
    6491  const VmaAllocation* pAllocations);
    6492  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    6493  const VkMemoryRequirements& vkMemReq,
    6494  bool requiresDedicatedAllocation,
    6495  bool prefersDedicatedAllocation,
    6496  const VmaAllocationCreateInfo& createInfo,
    6497  VmaAllocation allocation);
    6498  void RecordAllocateMemoryForImage(uint32_t frameIndex,
    6499  const VkMemoryRequirements& vkMemReq,
    6500  bool requiresDedicatedAllocation,
    6501  bool prefersDedicatedAllocation,
    6502  const VmaAllocationCreateInfo& createInfo,
    6503  VmaAllocation allocation);
    6504  void RecordFreeMemory(uint32_t frameIndex,
    6505  VmaAllocation allocation);
    6506  void RecordFreeMemoryPages(uint32_t frameIndex,
    6507  uint64_t allocationCount,
    6508  const VmaAllocation* pAllocations);
    6509  void RecordResizeAllocation(
    6510  uint32_t frameIndex,
    6511  VmaAllocation allocation,
    6512  VkDeviceSize newSize);
    6513  void RecordSetAllocationUserData(uint32_t frameIndex,
    6514  VmaAllocation allocation,
    6515  const void* pUserData);
    6516  void RecordCreateLostAllocation(uint32_t frameIndex,
    6517  VmaAllocation allocation);
    6518  void RecordMapMemory(uint32_t frameIndex,
    6519  VmaAllocation allocation);
    6520  void RecordUnmapMemory(uint32_t frameIndex,
    6521  VmaAllocation allocation);
    6522  void RecordFlushAllocation(uint32_t frameIndex,
    6523  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6524  void RecordInvalidateAllocation(uint32_t frameIndex,
    6525  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
    6526  void RecordCreateBuffer(uint32_t frameIndex,
    6527  const VkBufferCreateInfo& bufCreateInfo,
    6528  const VmaAllocationCreateInfo& allocCreateInfo,
    6529  VmaAllocation allocation);
    6530  void RecordCreateImage(uint32_t frameIndex,
    6531  const VkImageCreateInfo& imageCreateInfo,
    6532  const VmaAllocationCreateInfo& allocCreateInfo,
    6533  VmaAllocation allocation);
    6534  void RecordDestroyBuffer(uint32_t frameIndex,
    6535  VmaAllocation allocation);
    6536  void RecordDestroyImage(uint32_t frameIndex,
    6537  VmaAllocation allocation);
    6538  void RecordTouchAllocation(uint32_t frameIndex,
    6539  VmaAllocation allocation);
    6540  void RecordGetAllocationInfo(uint32_t frameIndex,
    6541  VmaAllocation allocation);
    6542  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
    6543  VmaPool pool);
    6544  void RecordDefragmentationBegin(uint32_t frameIndex,
    6545  const VmaDefragmentationInfo2& info,
    6547  void RecordDefragmentationEnd(uint32_t frameIndex,
    6549 
    6550 private:
    6551  struct CallParams
    6552  {
    6553  uint32_t threadId;
    6554  double time;
    6555  };
    6556 
    6557  class UserDataString
    6558  {
    6559  public:
    6560  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
    6561  const char* GetString() const { return m_Str; }
    6562 
    6563  private:
    6564  char m_PtrStr[17];
    6565  const char* m_Str;
    6566  };
    6567 
    6568  bool m_UseMutex;
    6569  VmaRecordFlags m_Flags;
    6570  FILE* m_File;
    6571  VMA_MUTEX m_FileMutex;
    6572  int64_t m_Freq;
    6573  int64_t m_StartCounter;
    6574 
    6575  void GetBasicParams(CallParams& outParams);
    6576 
    6577  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
    6578  template<typename T>
    6579  void PrintPointerList(uint64_t count, const T* pItems)
    6580  {
    6581  if(count)
    6582  {
    6583  fprintf(m_File, "%p", pItems[0]);
    6584  for(uint64_t i = 1; i < count; ++i)
    6585  {
    6586  fprintf(m_File, " %p", pItems[i]);
    6587  }
    6588  }
    6589  }
    6590 
    6591  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
    6592  void Flush();
    6593 };
    6594 
    6595 #endif // #if VMA_RECORDING_ENABLED
    6596 
    6597 /*
    6598 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
    6599 */
    6600 class VmaAllocationObjectAllocator
    6601 {
    6602  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
    6603 public:
    6604  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
    6605 
    6606  VmaAllocation Allocate();
    6607  void Free(VmaAllocation hAlloc);
    6608 
    6609 private:
    6610  VMA_MUTEX m_Mutex;
    6611  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
    6612 };
    6613 
    6614 // Main allocator object.
    6615 struct VmaAllocator_T
    6616 {
    6617  VMA_CLASS_NO_COPY(VmaAllocator_T)
    6618 public:
    6619  bool m_UseMutex;
    6620  bool m_UseKhrDedicatedAllocation;
    6621  VkDevice m_hDevice;
    6622  bool m_AllocationCallbacksSpecified;
    6623  VkAllocationCallbacks m_AllocationCallbacks;
    6624  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
    6625  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
    6626 
    6627  // Number of bytes free out of limit, or VK_WHOLE_SIZE if no limit for that heap.
    6628  VkDeviceSize m_HeapSizeLimit[VK_MAX_MEMORY_HEAPS];
    6629  VMA_MUTEX m_HeapSizeLimitMutex;
    6630 
    6631  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
    6632  VkPhysicalDeviceMemoryProperties m_MemProps;
    6633 
    6634  // Default pools.
    6635  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
    6636 
    6637  // Each vector is sorted by memory (handle value).
    6638  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
    6639  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
    6640  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
    6641 
    6642  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
    6643  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
    6644  ~VmaAllocator_T();
    6645 
    6646  const VkAllocationCallbacks* GetAllocationCallbacks() const
    6647  {
    6648  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
    6649  }
    6650  const VmaVulkanFunctions& GetVulkanFunctions() const
    6651  {
    6652  return m_VulkanFunctions;
    6653  }
    6654 
    6655  VkDeviceSize GetBufferImageGranularity() const
    6656  {
    6657  return VMA_MAX(
    6658  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
    6659  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
    6660  }
    6661 
    6662  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
    6663  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
    6664 
    6665  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
    6666  {
    6667  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
    6668  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
    6669  }
    6670  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
    6671  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
    6672  {
    6673  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
    6674  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    6675  }
    6676  // Minimum alignment for all allocations in specific memory type.
    6677  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
    6678  {
    6679  return IsMemoryTypeNonCoherent(memTypeIndex) ?
    6680  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
    6681  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
    6682  }
    6683 
    6684  bool IsIntegratedGpu() const
    6685  {
    6686  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
    6687  }
    6688 
    6689 #if VMA_RECORDING_ENABLED
    6690  VmaRecorder* GetRecorder() const { return m_pRecorder; }
    6691 #endif
    6692 
    6693  void GetBufferMemoryRequirements(
    6694  VkBuffer hBuffer,
    6695  VkMemoryRequirements& memReq,
    6696  bool& requiresDedicatedAllocation,
    6697  bool& prefersDedicatedAllocation) const;
    6698  void GetImageMemoryRequirements(
    6699  VkImage hImage,
    6700  VkMemoryRequirements& memReq,
    6701  bool& requiresDedicatedAllocation,
    6702  bool& prefersDedicatedAllocation) const;
    6703 
    6704  // Main allocation function.
    6705  VkResult AllocateMemory(
    6706  const VkMemoryRequirements& vkMemReq,
    6707  bool requiresDedicatedAllocation,
    6708  bool prefersDedicatedAllocation,
    6709  VkBuffer dedicatedBuffer,
    6710  VkImage dedicatedImage,
    6711  const VmaAllocationCreateInfo& createInfo,
    6712  VmaSuballocationType suballocType,
    6713  size_t allocationCount,
    6714  VmaAllocation* pAllocations);
    6715 
    6716  // Main deallocation function.
    6717  void FreeMemory(
    6718  size_t allocationCount,
    6719  const VmaAllocation* pAllocations);
    6720 
    6721  VkResult ResizeAllocation(
    6722  const VmaAllocation alloc,
    6723  VkDeviceSize newSize);
    6724 
    6725  void CalculateStats(VmaStats* pStats);
    6726 
    6727 #if VMA_STATS_STRING_ENABLED
    6728  void PrintDetailedMap(class VmaJsonWriter& json);
    6729 #endif
    6730 
    6731  VkResult DefragmentationBegin(
    6732  const VmaDefragmentationInfo2& info,
    6733  VmaDefragmentationStats* pStats,
    6734  VmaDefragmentationContext* pContext);
    6735  VkResult DefragmentationEnd(
    6736  VmaDefragmentationContext context);
    6737 
    6738  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
    6739  bool TouchAllocation(VmaAllocation hAllocation);
    6740 
    6741  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
    6742  void DestroyPool(VmaPool pool);
    6743  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
    6744 
    6745  void SetCurrentFrameIndex(uint32_t frameIndex);
    6746  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
    6747 
    6748  void MakePoolAllocationsLost(
    6749  VmaPool hPool,
    6750  size_t* pLostAllocationCount);
    6751  VkResult CheckPoolCorruption(VmaPool hPool);
    6752  VkResult CheckCorruption(uint32_t memoryTypeBits);
    6753 
    6754  void CreateLostAllocation(VmaAllocation* pAllocation);
    6755 
    6756  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
    6757  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
    6758 
    6759  VkResult Map(VmaAllocation hAllocation, void** ppData);
    6760  void Unmap(VmaAllocation hAllocation);
    6761 
    6762  VkResult BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer);
    6763  VkResult BindImageMemory(VmaAllocation hAllocation, VkImage hImage);
    6764 
    6765  void FlushOrInvalidateAllocation(
    6766  VmaAllocation hAllocation,
    6767  VkDeviceSize offset, VkDeviceSize size,
    6768  VMA_CACHE_OPERATION op);
    6769 
    6770  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
    6771 
    6772 private:
    6773  VkDeviceSize m_PreferredLargeHeapBlockSize;
    6774 
    6775  VkPhysicalDevice m_PhysicalDevice;
    6776  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
    6777 
    6778  VMA_RW_MUTEX m_PoolsMutex;
    6779  // Protected by m_PoolsMutex. Sorted by pointer value.
    6780  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
    6781  uint32_t m_NextPoolId;
    6782 
    6783  VmaVulkanFunctions m_VulkanFunctions;
    6784 
    6785 #if VMA_RECORDING_ENABLED
    6786  VmaRecorder* m_pRecorder;
    6787 #endif
    6788 
    6789  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
    6790 
    6791  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
    6792 
    6793  VkResult AllocateMemoryOfType(
    6794  VkDeviceSize size,
    6795  VkDeviceSize alignment,
    6796  bool dedicatedAllocation,
    6797  VkBuffer dedicatedBuffer,
    6798  VkImage dedicatedImage,
    6799  const VmaAllocationCreateInfo& createInfo,
    6800  uint32_t memTypeIndex,
    6801  VmaSuballocationType suballocType,
    6802  size_t allocationCount,
    6803  VmaAllocation* pAllocations);
    6804 
    6805  // Helper function only to be used inside AllocateDedicatedMemory.
    6806  VkResult AllocateDedicatedMemoryPage(
    6807  VkDeviceSize size,
    6808  VmaSuballocationType suballocType,
    6809  uint32_t memTypeIndex,
    6810  const VkMemoryAllocateInfo& allocInfo,
    6811  bool map,
    6812  bool isUserDataString,
    6813  void* pUserData,
    6814  VmaAllocation* pAllocation);
    6815 
    6816  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
    6817  VkResult AllocateDedicatedMemory(
    6818  VkDeviceSize size,
    6819  VmaSuballocationType suballocType,
    6820  uint32_t memTypeIndex,
    6821  bool map,
    6822  bool isUserDataString,
    6823  void* pUserData,
    6824  VkBuffer dedicatedBuffer,
    6825  VkImage dedicatedImage,
    6826  size_t allocationCount,
    6827  VmaAllocation* pAllocations);
    6828 
    6829  // Tries to free pMemory as Dedicated Memory. Returns true if found and freed.
    6830  void FreeDedicatedMemory(VmaAllocation allocation);
    6831 };
    6832 
    6834 // Memory allocation #2 after VmaAllocator_T definition
    6835 
    6836 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
    6837 {
    6838  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
    6839 }
    6840 
    6841 static void VmaFree(VmaAllocator hAllocator, void* ptr)
    6842 {
    6843  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
    6844 }
    6845 
    6846 template<typename T>
    6847 static T* VmaAllocate(VmaAllocator hAllocator)
    6848 {
    6849  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
    6850 }
    6851 
    6852 template<typename T>
    6853 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
    6854 {
    6855  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
    6856 }
    6857 
    6858 template<typename T>
    6859 static void vma_delete(VmaAllocator hAllocator, T* ptr)
    6860 {
    6861  if(ptr != VMA_NULL)
    6862  {
    6863  ptr->~T();
    6864  VmaFree(hAllocator, ptr);
    6865  }
    6866 }
    6867 
    6868 template<typename T>
    6869 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
    6870 {
    6871  if(ptr != VMA_NULL)
    6872  {
    6873  for(size_t i = count; i--; )
    6874  ptr[i].~T();
    6875  VmaFree(hAllocator, ptr);
    6876  }
    6877 }
    6878 
    6880 // VmaStringBuilder
    6881 
    6882 #if VMA_STATS_STRING_ENABLED
    6883 
    6884 class VmaStringBuilder
    6885 {
    6886 public:
    6887  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
    6888  size_t GetLength() const { return m_Data.size(); }
    6889  const char* GetData() const { return m_Data.data(); }
    6890 
    6891  void Add(char ch) { m_Data.push_back(ch); }
    6892  void Add(const char* pStr);
    6893  void AddNewLine() { Add('\n'); }
    6894  void AddNumber(uint32_t num);
    6895  void AddNumber(uint64_t num);
    6896  void AddPointer(const void* ptr);
    6897 
    6898 private:
    6899  VmaVector< char, VmaStlAllocator<char> > m_Data;
    6900 };
    6901 
    6902 void VmaStringBuilder::Add(const char* pStr)
    6903 {
    6904  const size_t strLen = strlen(pStr);
    6905  if(strLen > 0)
    6906  {
    6907  const size_t oldCount = m_Data.size();
    6908  m_Data.resize(oldCount + strLen);
    6909  memcpy(m_Data.data() + oldCount, pStr, strLen);
    6910  }
    6911 }
    6912 
    6913 void VmaStringBuilder::AddNumber(uint32_t num)
    6914 {
    6915  char buf[11];
    6916  VmaUint32ToStr(buf, sizeof(buf), num);
    6917  Add(buf);
    6918 }
    6919 
    6920 void VmaStringBuilder::AddNumber(uint64_t num)
    6921 {
    6922  char buf[21];
    6923  VmaUint64ToStr(buf, sizeof(buf), num);
    6924  Add(buf);
    6925 }
    6926 
    6927 void VmaStringBuilder::AddPointer(const void* ptr)
    6928 {
    6929  char buf[21];
    6930  VmaPtrToStr(buf, sizeof(buf), ptr);
    6931  Add(buf);
    6932 }
    6933 
    6934 #endif // #if VMA_STATS_STRING_ENABLED
    6935 
    6937 // VmaJsonWriter
    6938 
    6939 #if VMA_STATS_STRING_ENABLED
    6940 
    6941 class VmaJsonWriter
    6942 {
    6943  VMA_CLASS_NO_COPY(VmaJsonWriter)
    6944 public:
    6945  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
    6946  ~VmaJsonWriter();
    6947 
    6948  void BeginObject(bool singleLine = false);
    6949  void EndObject();
    6950 
    6951  void BeginArray(bool singleLine = false);
    6952  void EndArray();
    6953 
    6954  void WriteString(const char* pStr);
    6955  void BeginString(const char* pStr = VMA_NULL);
    6956  void ContinueString(const char* pStr);
    6957  void ContinueString(uint32_t n);
    6958  void ContinueString(uint64_t n);
    6959  void ContinueString_Pointer(const void* ptr);
    6960  void EndString(const char* pStr = VMA_NULL);
    6961 
    6962  void WriteNumber(uint32_t n);
    6963  void WriteNumber(uint64_t n);
    6964  void WriteBool(bool b);
    6965  void WriteNull();
    6966 
    6967 private:
    6968  static const char* const INDENT;
    6969 
    6970  enum COLLECTION_TYPE
    6971  {
    6972  COLLECTION_TYPE_OBJECT,
    6973  COLLECTION_TYPE_ARRAY,
    6974  };
    6975  struct StackItem
    6976  {
    6977  COLLECTION_TYPE type;
    6978  uint32_t valueCount;
    6979  bool singleLineMode;
    6980  };
    6981 
    6982  VmaStringBuilder& m_SB;
    6983  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
    6984  bool m_InsideString;
    6985 
    6986  void BeginValue(bool isString);
    6987  void WriteIndent(bool oneLess = false);
    6988 };
    6989 
    6990 const char* const VmaJsonWriter::INDENT = " ";
    6991 
    6992 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
    6993  m_SB(sb),
    6994  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
    6995  m_InsideString(false)
    6996 {
    6997 }
    6998 
    6999 VmaJsonWriter::~VmaJsonWriter()
    7000 {
    7001  VMA_ASSERT(!m_InsideString);
    7002  VMA_ASSERT(m_Stack.empty());
    7003 }
    7004 
    7005 void VmaJsonWriter::BeginObject(bool singleLine)
    7006 {
    7007  VMA_ASSERT(!m_InsideString);
    7008 
    7009  BeginValue(false);
    7010  m_SB.Add('{');
    7011 
    7012  StackItem item;
    7013  item.type = COLLECTION_TYPE_OBJECT;
    7014  item.valueCount = 0;
    7015  item.singleLineMode = singleLine;
    7016  m_Stack.push_back(item);
    7017 }
    7018 
    7019 void VmaJsonWriter::EndObject()
    7020 {
    7021  VMA_ASSERT(!m_InsideString);
    7022 
    7023  WriteIndent(true);
    7024  m_SB.Add('}');
    7025 
    7026  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
    7027  m_Stack.pop_back();
    7028 }
    7029 
    7030 void VmaJsonWriter::BeginArray(bool singleLine)
    7031 {
    7032  VMA_ASSERT(!m_InsideString);
    7033 
    7034  BeginValue(false);
    7035  m_SB.Add('[');
    7036 
    7037  StackItem item;
    7038  item.type = COLLECTION_TYPE_ARRAY;
    7039  item.valueCount = 0;
    7040  item.singleLineMode = singleLine;
    7041  m_Stack.push_back(item);
    7042 }
    7043 
    7044 void VmaJsonWriter::EndArray()
    7045 {
    7046  VMA_ASSERT(!m_InsideString);
    7047 
    7048  WriteIndent(true);
    7049  m_SB.Add(']');
    7050 
    7051  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
    7052  m_Stack.pop_back();
    7053 }
    7054 
    7055 void VmaJsonWriter::WriteString(const char* pStr)
    7056 {
    7057  BeginString(pStr);
    7058  EndString();
    7059 }
    7060 
    7061 void VmaJsonWriter::BeginString(const char* pStr)
    7062 {
    7063  VMA_ASSERT(!m_InsideString);
    7064 
    7065  BeginValue(true);
    7066  m_SB.Add('"');
    7067  m_InsideString = true;
    7068  if(pStr != VMA_NULL && pStr[0] != '\0')
    7069  {
    7070  ContinueString(pStr);
    7071  }
    7072 }
    7073 
    7074 void VmaJsonWriter::ContinueString(const char* pStr)
    7075 {
    7076  VMA_ASSERT(m_InsideString);
    7077 
    7078  const size_t strLen = strlen(pStr);
    7079  for(size_t i = 0; i < strLen; ++i)
    7080  {
    7081  char ch = pStr[i];
    7082  if(ch == '\\')
    7083  {
    7084  m_SB.Add("\\\\");
    7085  }
    7086  else if(ch == '"')
    7087  {
    7088  m_SB.Add("\\\"");
    7089  }
    7090  else if(ch >= 32)
    7091  {
    7092  m_SB.Add(ch);
    7093  }
    7094  else switch(ch)
    7095  {
    7096  case '\b':
    7097  m_SB.Add("\\b");
    7098  break;
    7099  case '\f':
    7100  m_SB.Add("\\f");
    7101  break;
    7102  case '\n':
    7103  m_SB.Add("\\n");
    7104  break;
    7105  case '\r':
    7106  m_SB.Add("\\r");
    7107  break;
    7108  case '\t':
    7109  m_SB.Add("\\t");
    7110  break;
    7111  default:
    7112  VMA_ASSERT(0 && "Character not currently supported.");
    7113  break;
    7114  }
    7115  }
    7116 }
    7117 
    7118 void VmaJsonWriter::ContinueString(uint32_t n)
    7119 {
    7120  VMA_ASSERT(m_InsideString);
    7121  m_SB.AddNumber(n);
    7122 }
    7123 
    7124 void VmaJsonWriter::ContinueString(uint64_t n)
    7125 {
    7126  VMA_ASSERT(m_InsideString);
    7127  m_SB.AddNumber(n);
    7128 }
    7129 
    7130 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
    7131 {
    7132  VMA_ASSERT(m_InsideString);
    7133  m_SB.AddPointer(ptr);
    7134 }
    7135 
    7136 void VmaJsonWriter::EndString(const char* pStr)
    7137 {
    7138  VMA_ASSERT(m_InsideString);
    7139  if(pStr != VMA_NULL && pStr[0] != '\0')
    7140  {
    7141  ContinueString(pStr);
    7142  }
    7143  m_SB.Add('"');
    7144  m_InsideString = false;
    7145 }
    7146 
    7147 void VmaJsonWriter::WriteNumber(uint32_t n)
    7148 {
    7149  VMA_ASSERT(!m_InsideString);
    7150  BeginValue(false);
    7151  m_SB.AddNumber(n);
    7152 }
    7153 
    7154 void VmaJsonWriter::WriteNumber(uint64_t n)
    7155 {
    7156  VMA_ASSERT(!m_InsideString);
    7157  BeginValue(false);
    7158  m_SB.AddNumber(n);
    7159 }
    7160 
    7161 void VmaJsonWriter::WriteBool(bool b)
    7162 {
    7163  VMA_ASSERT(!m_InsideString);
    7164  BeginValue(false);
    7165  m_SB.Add(b ? "true" : "false");
    7166 }
    7167 
    7168 void VmaJsonWriter::WriteNull()
    7169 {
    7170  VMA_ASSERT(!m_InsideString);
    7171  BeginValue(false);
    7172  m_SB.Add("null");
    7173 }
    7174 
    7175 void VmaJsonWriter::BeginValue(bool isString)
    7176 {
    7177  if(!m_Stack.empty())
    7178  {
    7179  StackItem& currItem = m_Stack.back();
    7180  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7181  currItem.valueCount % 2 == 0)
    7182  {
    7183  VMA_ASSERT(isString);
    7184  }
    7185 
    7186  if(currItem.type == COLLECTION_TYPE_OBJECT &&
    7187  currItem.valueCount % 2 != 0)
    7188  {
    7189  m_SB.Add(": ");
    7190  }
    7191  else if(currItem.valueCount > 0)
    7192  {
    7193  m_SB.Add(", ");
    7194  WriteIndent();
    7195  }
    7196  else
    7197  {
    7198  WriteIndent();
    7199  }
    7200  ++currItem.valueCount;
    7201  }
    7202 }
    7203 
    7204 void VmaJsonWriter::WriteIndent(bool oneLess)
    7205 {
    7206  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
    7207  {
    7208  m_SB.AddNewLine();
    7209 
    7210  size_t count = m_Stack.size();
    7211  if(count > 0 && oneLess)
    7212  {
    7213  --count;
    7214  }
    7215  for(size_t i = 0; i < count; ++i)
    7216  {
    7217  m_SB.Add(INDENT);
    7218  }
    7219  }
    7220 }
    7221 
    7222 #endif // #if VMA_STATS_STRING_ENABLED
    7223 
    7225 
    7226 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
    7227 {
    7228  if(IsUserDataString())
    7229  {
    7230  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
    7231 
    7232  FreeUserDataString(hAllocator);
    7233 
    7234  if(pUserData != VMA_NULL)
    7235  {
    7236  const char* const newStrSrc = (char*)pUserData;
    7237  const size_t newStrLen = strlen(newStrSrc);
    7238  char* const newStrDst = vma_new_array(hAllocator, char, newStrLen + 1);
    7239  memcpy(newStrDst, newStrSrc, newStrLen + 1);
    7240  m_pUserData = newStrDst;
    7241  }
    7242  }
    7243  else
    7244  {
    7245  m_pUserData = pUserData;
    7246  }
    7247 }
    7248 
    7249 void VmaAllocation_T::ChangeBlockAllocation(
    7250  VmaAllocator hAllocator,
    7251  VmaDeviceMemoryBlock* block,
    7252  VkDeviceSize offset)
    7253 {
    7254  VMA_ASSERT(block != VMA_NULL);
    7255  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7256 
    7257  // Move mapping reference counter from old block to new block.
    7258  if(block != m_BlockAllocation.m_Block)
    7259  {
    7260  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
    7261  if(IsPersistentMap())
    7262  ++mapRefCount;
    7263  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
    7264  block->Map(hAllocator, mapRefCount, VMA_NULL);
    7265  }
    7266 
    7267  m_BlockAllocation.m_Block = block;
    7268  m_BlockAllocation.m_Offset = offset;
    7269 }
    7270 
    7271 void VmaAllocation_T::ChangeSize(VkDeviceSize newSize)
    7272 {
    7273  VMA_ASSERT(newSize > 0);
    7274  m_Size = newSize;
    7275 }
    7276 
    7277 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
    7278 {
    7279  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
    7280  m_BlockAllocation.m_Offset = newOffset;
    7281 }
    7282 
    7283 VkDeviceSize VmaAllocation_T::GetOffset() const
    7284 {
    7285  switch(m_Type)
    7286  {
    7287  case ALLOCATION_TYPE_BLOCK:
    7288  return m_BlockAllocation.m_Offset;
    7289  case ALLOCATION_TYPE_DEDICATED:
    7290  return 0;
    7291  default:
    7292  VMA_ASSERT(0);
    7293  return 0;
    7294  }
    7295 }
    7296 
    7297 VkDeviceMemory VmaAllocation_T::GetMemory() const
    7298 {
    7299  switch(m_Type)
    7300  {
    7301  case ALLOCATION_TYPE_BLOCK:
    7302  return m_BlockAllocation.m_Block->GetDeviceMemory();
    7303  case ALLOCATION_TYPE_DEDICATED:
    7304  return m_DedicatedAllocation.m_hMemory;
    7305  default:
    7306  VMA_ASSERT(0);
    7307  return VK_NULL_HANDLE;
    7308  }
    7309 }
    7310 
    7311 uint32_t VmaAllocation_T::GetMemoryTypeIndex() const
    7312 {
    7313  switch(m_Type)
    7314  {
    7315  case ALLOCATION_TYPE_BLOCK:
    7316  return m_BlockAllocation.m_Block->GetMemoryTypeIndex();
    7317  case ALLOCATION_TYPE_DEDICATED:
    7318  return m_DedicatedAllocation.m_MemoryTypeIndex;
    7319  default:
    7320  VMA_ASSERT(0);
    7321  return UINT32_MAX;
    7322  }
    7323 }
    7324 
    7325 void* VmaAllocation_T::GetMappedData() const
    7326 {
    7327  switch(m_Type)
    7328  {
    7329  case ALLOCATION_TYPE_BLOCK:
    7330  if(m_MapCount != 0)
    7331  {
    7332  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
    7333  VMA_ASSERT(pBlockData != VMA_NULL);
    7334  return (char*)pBlockData + m_BlockAllocation.m_Offset;
    7335  }
    7336  else
    7337  {
    7338  return VMA_NULL;
    7339  }
    7340  break;
    7341  case ALLOCATION_TYPE_DEDICATED:
    7342  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
    7343  return m_DedicatedAllocation.m_pMappedData;
    7344  default:
    7345  VMA_ASSERT(0);
    7346  return VMA_NULL;
    7347  }
    7348 }
    7349 
    7350 bool VmaAllocation_T::CanBecomeLost() const
    7351 {
    7352  switch(m_Type)
    7353  {
    7354  case ALLOCATION_TYPE_BLOCK:
    7355  return m_BlockAllocation.m_CanBecomeLost;
    7356  case ALLOCATION_TYPE_DEDICATED:
    7357  return false;
    7358  default:
    7359  VMA_ASSERT(0);
    7360  return false;
    7361  }
    7362 }
    7363 
    7364 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    7365 {
    7366  VMA_ASSERT(CanBecomeLost());
    7367 
    7368  /*
    7369  Warning: This is a carefully designed algorithm.
    7370  Do not modify unless you really know what you're doing :)
    7371  */
    7372  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
    7373  for(;;)
    7374  {
    7375  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    7376  {
    7377  VMA_ASSERT(0);
    7378  return false;
    7379  }
    7380  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
    7381  {
    7382  return false;
    7383  }
    7384  else // Last use time earlier than current time.
    7385  {
    7386  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
    7387  {
    7388  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
    7389  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
    7390  return true;
    7391  }
    7392  }
    7393  }
    7394 }
    7395 
    7396 #if VMA_STATS_STRING_ENABLED
    7397 
    7398 // Correspond to values of enum VmaSuballocationType.
    7399 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
    7400  "FREE",
    7401  "UNKNOWN",
    7402  "BUFFER",
    7403  "IMAGE_UNKNOWN",
    7404  "IMAGE_LINEAR",
    7405  "IMAGE_OPTIMAL",
    7406 };
    7407 
    7408 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
    7409 {
    7410  json.WriteString("Type");
    7411  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
    7412 
    7413  json.WriteString("Size");
    7414  json.WriteNumber(m_Size);
    7415 
    7416  if(m_pUserData != VMA_NULL)
    7417  {
    7418  json.WriteString("UserData");
    7419  if(IsUserDataString())
    7420  {
    7421  json.WriteString((const char*)m_pUserData);
    7422  }
    7423  else
    7424  {
    7425  json.BeginString();
    7426  json.ContinueString_Pointer(m_pUserData);
    7427  json.EndString();
    7428  }
    7429  }
    7430 
    7431  json.WriteString("CreationFrameIndex");
    7432  json.WriteNumber(m_CreationFrameIndex);
    7433 
    7434  json.WriteString("LastUseFrameIndex");
    7435  json.WriteNumber(GetLastUseFrameIndex());
    7436 
    7437  if(m_BufferImageUsage != 0)
    7438  {
    7439  json.WriteString("Usage");
    7440  json.WriteNumber(m_BufferImageUsage);
    7441  }
    7442 }
    7443 
    7444 #endif
    7445 
    7446 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
    7447 {
    7448  VMA_ASSERT(IsUserDataString());
    7449  if(m_pUserData != VMA_NULL)
    7450  {
    7451  char* const oldStr = (char*)m_pUserData;
    7452  const size_t oldStrLen = strlen(oldStr);
    7453  vma_delete_array(hAllocator, oldStr, oldStrLen + 1);
    7454  m_pUserData = VMA_NULL;
    7455  }
    7456 }
    7457 
    7458 void VmaAllocation_T::BlockAllocMap()
    7459 {
    7460  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7461 
    7462  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7463  {
    7464  ++m_MapCount;
    7465  }
    7466  else
    7467  {
    7468  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
    7469  }
    7470 }
    7471 
    7472 void VmaAllocation_T::BlockAllocUnmap()
    7473 {
    7474  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
    7475 
    7476  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7477  {
    7478  --m_MapCount;
    7479  }
    7480  else
    7481  {
    7482  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
    7483  }
    7484 }
    7485 
    7486 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
    7487 {
    7488  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7489 
    7490  if(m_MapCount != 0)
    7491  {
    7492  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
    7493  {
    7494  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
    7495  *ppData = m_DedicatedAllocation.m_pMappedData;
    7496  ++m_MapCount;
    7497  return VK_SUCCESS;
    7498  }
    7499  else
    7500  {
    7501  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
    7502  return VK_ERROR_MEMORY_MAP_FAILED;
    7503  }
    7504  }
    7505  else
    7506  {
    7507  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    7508  hAllocator->m_hDevice,
    7509  m_DedicatedAllocation.m_hMemory,
    7510  0, // offset
    7511  VK_WHOLE_SIZE,
    7512  0, // flags
    7513  ppData);
    7514  if(result == VK_SUCCESS)
    7515  {
    7516  m_DedicatedAllocation.m_pMappedData = *ppData;
    7517  m_MapCount = 1;
    7518  }
    7519  return result;
    7520  }
    7521 }
    7522 
    7523 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
    7524 {
    7525  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
    7526 
    7527  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
    7528  {
    7529  --m_MapCount;
    7530  if(m_MapCount == 0)
    7531  {
    7532  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
    7533  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
    7534  hAllocator->m_hDevice,
    7535  m_DedicatedAllocation.m_hMemory);
    7536  }
    7537  }
    7538  else
    7539  {
    7540  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
    7541  }
    7542 }
    7543 
    7544 #if VMA_STATS_STRING_ENABLED
    7545 
    7546 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
    7547 {
    7548  json.BeginObject();
    7549 
    7550  json.WriteString("Blocks");
    7551  json.WriteNumber(stat.blockCount);
    7552 
    7553  json.WriteString("Allocations");
    7554  json.WriteNumber(stat.allocationCount);
    7555 
    7556  json.WriteString("UnusedRanges");
    7557  json.WriteNumber(stat.unusedRangeCount);
    7558 
    7559  json.WriteString("UsedBytes");
    7560  json.WriteNumber(stat.usedBytes);
    7561 
    7562  json.WriteString("UnusedBytes");
    7563  json.WriteNumber(stat.unusedBytes);
    7564 
    7565  if(stat.allocationCount > 1)
    7566  {
    7567  json.WriteString("AllocationSize");
    7568  json.BeginObject(true);
    7569  json.WriteString("Min");
    7570  json.WriteNumber(stat.allocationSizeMin);
    7571  json.WriteString("Avg");
    7572  json.WriteNumber(stat.allocationSizeAvg);
    7573  json.WriteString("Max");
    7574  json.WriteNumber(stat.allocationSizeMax);
    7575  json.EndObject();
    7576  }
    7577 
    7578  if(stat.unusedRangeCount > 1)
    7579  {
    7580  json.WriteString("UnusedRangeSize");
    7581  json.BeginObject(true);
    7582  json.WriteString("Min");
    7583  json.WriteNumber(stat.unusedRangeSizeMin);
    7584  json.WriteString("Avg");
    7585  json.WriteNumber(stat.unusedRangeSizeAvg);
    7586  json.WriteString("Max");
    7587  json.WriteNumber(stat.unusedRangeSizeMax);
    7588  json.EndObject();
    7589  }
    7590 
    7591  json.EndObject();
    7592 }
    7593 
    7594 #endif // #if VMA_STATS_STRING_ENABLED
    7595 
    7596 struct VmaSuballocationItemSizeLess
    7597 {
    7598  bool operator()(
    7599  const VmaSuballocationList::iterator lhs,
    7600  const VmaSuballocationList::iterator rhs) const
    7601  {
    7602  return lhs->size < rhs->size;
    7603  }
    7604  bool operator()(
    7605  const VmaSuballocationList::iterator lhs,
    7606  VkDeviceSize rhsSize) const
    7607  {
    7608  return lhs->size < rhsSize;
    7609  }
    7610 };
    7611 
    7612 
    7614 // class VmaBlockMetadata
    7615 
    7616 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
    7617  m_Size(0),
    7618  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
    7619 {
    7620 }
    7621 
    7622 #if VMA_STATS_STRING_ENABLED
    7623 
    7624 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
    7625  VkDeviceSize unusedBytes,
    7626  size_t allocationCount,
    7627  size_t unusedRangeCount) const
    7628 {
    7629  json.BeginObject();
    7630 
    7631  json.WriteString("TotalBytes");
    7632  json.WriteNumber(GetSize());
    7633 
    7634  json.WriteString("UnusedBytes");
    7635  json.WriteNumber(unusedBytes);
    7636 
    7637  json.WriteString("Allocations");
    7638  json.WriteNumber((uint64_t)allocationCount);
    7639 
    7640  json.WriteString("UnusedRanges");
    7641  json.WriteNumber((uint64_t)unusedRangeCount);
    7642 
    7643  json.WriteString("Suballocations");
    7644  json.BeginArray();
    7645 }
    7646 
    7647 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
    7648  VkDeviceSize offset,
    7649  VmaAllocation hAllocation) const
    7650 {
    7651  json.BeginObject(true);
    7652 
    7653  json.WriteString("Offset");
    7654  json.WriteNumber(offset);
    7655 
    7656  hAllocation->PrintParameters(json);
    7657 
    7658  json.EndObject();
    7659 }
    7660 
    7661 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
    7662  VkDeviceSize offset,
    7663  VkDeviceSize size) const
    7664 {
    7665  json.BeginObject(true);
    7666 
    7667  json.WriteString("Offset");
    7668  json.WriteNumber(offset);
    7669 
    7670  json.WriteString("Type");
    7671  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
    7672 
    7673  json.WriteString("Size");
    7674  json.WriteNumber(size);
    7675 
    7676  json.EndObject();
    7677 }
    7678 
    7679 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
    7680 {
    7681  json.EndArray();
    7682  json.EndObject();
    7683 }
    7684 
    7685 #endif // #if VMA_STATS_STRING_ENABLED
    7686 
    7688 // class VmaBlockMetadata_Generic
    7689 
    7690 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
    7691  VmaBlockMetadata(hAllocator),
    7692  m_FreeCount(0),
    7693  m_SumFreeSize(0),
    7694  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    7695  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
    7696 {
    7697 }
    7698 
    7699 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
    7700 {
    7701 }
    7702 
    7703 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
    7704 {
    7705  VmaBlockMetadata::Init(size);
    7706 
    7707  m_FreeCount = 1;
    7708  m_SumFreeSize = size;
    7709 
    7710  VmaSuballocation suballoc = {};
    7711  suballoc.offset = 0;
    7712  suballoc.size = size;
    7713  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    7714  suballoc.hAllocation = VK_NULL_HANDLE;
    7715 
    7716  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    7717  m_Suballocations.push_back(suballoc);
    7718  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
    7719  --suballocItem;
    7720  m_FreeSuballocationsBySize.push_back(suballocItem);
    7721 }
    7722 
    7723 bool VmaBlockMetadata_Generic::Validate() const
    7724 {
    7725  VMA_VALIDATE(!m_Suballocations.empty());
    7726 
    7727  // Expected offset of new suballocation as calculated from previous ones.
    7728  VkDeviceSize calculatedOffset = 0;
    7729  // Expected number of free suballocations as calculated from traversing their list.
    7730  uint32_t calculatedFreeCount = 0;
    7731  // Expected sum size of free suballocations as calculated from traversing their list.
    7732  VkDeviceSize calculatedSumFreeSize = 0;
    7733  // Expected number of free suballocations that should be registered in
    7734  // m_FreeSuballocationsBySize calculated from traversing their list.
    7735  size_t freeSuballocationsToRegister = 0;
    7736  // True if previous visited suballocation was free.
    7737  bool prevFree = false;
    7738 
    7739  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7740  suballocItem != m_Suballocations.cend();
    7741  ++suballocItem)
    7742  {
    7743  const VmaSuballocation& subAlloc = *suballocItem;
    7744 
    7745  // Actual offset of this suballocation doesn't match expected one.
    7746  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
    7747 
    7748  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
    7749  // Two adjacent free suballocations are invalid. They should be merged.
    7750  VMA_VALIDATE(!prevFree || !currFree);
    7751 
    7752  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
    7753 
    7754  if(currFree)
    7755  {
    7756  calculatedSumFreeSize += subAlloc.size;
    7757  ++calculatedFreeCount;
    7758  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    7759  {
    7760  ++freeSuballocationsToRegister;
    7761  }
    7762 
    7763  // Margin required between allocations - every free space must be at least that large.
    7764  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
    7765  }
    7766  else
    7767  {
    7768  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
    7769  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
    7770 
    7771  // Margin required between allocations - previous allocation must be free.
    7772  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
    7773  }
    7774 
    7775  calculatedOffset += subAlloc.size;
    7776  prevFree = currFree;
    7777  }
    7778 
    7779  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
    7780  // match expected one.
    7781  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
    7782 
    7783  VkDeviceSize lastSize = 0;
    7784  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
    7785  {
    7786  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
    7787 
    7788  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
    7789  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    7790  // They must be sorted by size ascending.
    7791  VMA_VALIDATE(suballocItem->size >= lastSize);
    7792 
    7793  lastSize = suballocItem->size;
    7794  }
    7795 
    7796  // Check if totals match calculacted values.
    7797  VMA_VALIDATE(ValidateFreeSuballocationList());
    7798  VMA_VALIDATE(calculatedOffset == GetSize());
    7799  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
    7800  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
    7801 
    7802  return true;
    7803 }
    7804 
    7805 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
    7806 {
    7807  if(!m_FreeSuballocationsBySize.empty())
    7808  {
    7809  return m_FreeSuballocationsBySize.back()->size;
    7810  }
    7811  else
    7812  {
    7813  return 0;
    7814  }
    7815 }
    7816 
    7817 bool VmaBlockMetadata_Generic::IsEmpty() const
    7818 {
    7819  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
    7820 }
    7821 
    7822 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    7823 {
    7824  outInfo.blockCount = 1;
    7825 
    7826  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7827  outInfo.allocationCount = rangeCount - m_FreeCount;
    7828  outInfo.unusedRangeCount = m_FreeCount;
    7829 
    7830  outInfo.unusedBytes = m_SumFreeSize;
    7831  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
    7832 
    7833  outInfo.allocationSizeMin = UINT64_MAX;
    7834  outInfo.allocationSizeMax = 0;
    7835  outInfo.unusedRangeSizeMin = UINT64_MAX;
    7836  outInfo.unusedRangeSizeMax = 0;
    7837 
    7838  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7839  suballocItem != m_Suballocations.cend();
    7840  ++suballocItem)
    7841  {
    7842  const VmaSuballocation& suballoc = *suballocItem;
    7843  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    7844  {
    7845  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    7846  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
    7847  }
    7848  else
    7849  {
    7850  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
    7851  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
    7852  }
    7853  }
    7854 }
    7855 
    7856 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
    7857 {
    7858  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
    7859 
    7860  inoutStats.size += GetSize();
    7861  inoutStats.unusedSize += m_SumFreeSize;
    7862  inoutStats.allocationCount += rangeCount - m_FreeCount;
    7863  inoutStats.unusedRangeCount += m_FreeCount;
    7864  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    7865 }
    7866 
    7867 #if VMA_STATS_STRING_ENABLED
    7868 
    7869 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
    7870 {
    7871  PrintDetailedMap_Begin(json,
    7872  m_SumFreeSize, // unusedBytes
    7873  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
    7874  m_FreeCount); // unusedRangeCount
    7875 
    7876  size_t i = 0;
    7877  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
    7878  suballocItem != m_Suballocations.cend();
    7879  ++suballocItem, ++i)
    7880  {
    7881  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    7882  {
    7883  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
    7884  }
    7885  else
    7886  {
    7887  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
    7888  }
    7889  }
    7890 
    7891  PrintDetailedMap_End(json);
    7892 }
    7893 
    7894 #endif // #if VMA_STATS_STRING_ENABLED
    7895 
    7896 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
    7897  uint32_t currentFrameIndex,
    7898  uint32_t frameInUseCount,
    7899  VkDeviceSize bufferImageGranularity,
    7900  VkDeviceSize allocSize,
    7901  VkDeviceSize allocAlignment,
    7902  bool upperAddress,
    7903  VmaSuballocationType allocType,
    7904  bool canMakeOtherLost,
    7905  uint32_t strategy,
    7906  VmaAllocationRequest* pAllocationRequest)
    7907 {
    7908  VMA_ASSERT(allocSize > 0);
    7909  VMA_ASSERT(!upperAddress);
    7910  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    7911  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    7912  VMA_HEAVY_ASSERT(Validate());
    7913 
    7914  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    7915 
    7916  // There is not enough total free space in this block to fullfill the request: Early return.
    7917  if(canMakeOtherLost == false &&
    7918  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
    7919  {
    7920  return false;
    7921  }
    7922 
    7923  // New algorithm, efficiently searching freeSuballocationsBySize.
    7924  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
    7925  if(freeSuballocCount > 0)
    7926  {
    7928  {
    7929  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
    7930  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    7931  m_FreeSuballocationsBySize.data(),
    7932  m_FreeSuballocationsBySize.data() + freeSuballocCount,
    7933  allocSize + 2 * VMA_DEBUG_MARGIN,
    7934  VmaSuballocationItemSizeLess());
    7935  size_t index = it - m_FreeSuballocationsBySize.data();
    7936  for(; index < freeSuballocCount; ++index)
    7937  {
    7938  if(CheckAllocation(
    7939  currentFrameIndex,
    7940  frameInUseCount,
    7941  bufferImageGranularity,
    7942  allocSize,
    7943  allocAlignment,
    7944  allocType,
    7945  m_FreeSuballocationsBySize[index],
    7946  false, // canMakeOtherLost
    7947  &pAllocationRequest->offset,
    7948  &pAllocationRequest->itemsToMakeLostCount,
    7949  &pAllocationRequest->sumFreeSize,
    7950  &pAllocationRequest->sumItemSize))
    7951  {
    7952  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    7953  return true;
    7954  }
    7955  }
    7956  }
    7957  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
    7958  {
    7959  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    7960  it != m_Suballocations.end();
    7961  ++it)
    7962  {
    7963  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
    7964  currentFrameIndex,
    7965  frameInUseCount,
    7966  bufferImageGranularity,
    7967  allocSize,
    7968  allocAlignment,
    7969  allocType,
    7970  it,
    7971  false, // canMakeOtherLost
    7972  &pAllocationRequest->offset,
    7973  &pAllocationRequest->itemsToMakeLostCount,
    7974  &pAllocationRequest->sumFreeSize,
    7975  &pAllocationRequest->sumItemSize))
    7976  {
    7977  pAllocationRequest->item = it;
    7978  return true;
    7979  }
    7980  }
    7981  }
    7982  else // WORST_FIT, FIRST_FIT
    7983  {
    7984  // Search staring from biggest suballocations.
    7985  for(size_t index = freeSuballocCount; index--; )
    7986  {
    7987  if(CheckAllocation(
    7988  currentFrameIndex,
    7989  frameInUseCount,
    7990  bufferImageGranularity,
    7991  allocSize,
    7992  allocAlignment,
    7993  allocType,
    7994  m_FreeSuballocationsBySize[index],
    7995  false, // canMakeOtherLost
    7996  &pAllocationRequest->offset,
    7997  &pAllocationRequest->itemsToMakeLostCount,
    7998  &pAllocationRequest->sumFreeSize,
    7999  &pAllocationRequest->sumItemSize))
    8000  {
    8001  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
    8002  return true;
    8003  }
    8004  }
    8005  }
    8006  }
    8007 
    8008  if(canMakeOtherLost)
    8009  {
    8010  // Brute-force algorithm. TODO: Come up with something better.
    8011 
    8012  bool found = false;
    8013  VmaAllocationRequest tmpAllocRequest = {};
    8014  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
    8015  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
    8016  suballocIt != m_Suballocations.end();
    8017  ++suballocIt)
    8018  {
    8019  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
    8020  suballocIt->hAllocation->CanBecomeLost())
    8021  {
    8022  if(CheckAllocation(
    8023  currentFrameIndex,
    8024  frameInUseCount,
    8025  bufferImageGranularity,
    8026  allocSize,
    8027  allocAlignment,
    8028  allocType,
    8029  suballocIt,
    8030  canMakeOtherLost,
    8031  &tmpAllocRequest.offset,
    8032  &tmpAllocRequest.itemsToMakeLostCount,
    8033  &tmpAllocRequest.sumFreeSize,
    8034  &tmpAllocRequest.sumItemSize))
    8035  {
    8037  {
    8038  *pAllocationRequest = tmpAllocRequest;
    8039  pAllocationRequest->item = suballocIt;
    8040  break;
    8041  }
    8042  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
    8043  {
    8044  *pAllocationRequest = tmpAllocRequest;
    8045  pAllocationRequest->item = suballocIt;
    8046  found = true;
    8047  }
    8048  }
    8049  }
    8050  }
    8051 
    8052  return found;
    8053  }
    8054 
    8055  return false;
    8056 }
    8057 
    8058 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
    8059  uint32_t currentFrameIndex,
    8060  uint32_t frameInUseCount,
    8061  VmaAllocationRequest* pAllocationRequest)
    8062 {
    8063  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
    8064 
    8065  while(pAllocationRequest->itemsToMakeLostCount > 0)
    8066  {
    8067  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
    8068  {
    8069  ++pAllocationRequest->item;
    8070  }
    8071  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8072  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
    8073  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
    8074  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8075  {
    8076  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
    8077  --pAllocationRequest->itemsToMakeLostCount;
    8078  }
    8079  else
    8080  {
    8081  return false;
    8082  }
    8083  }
    8084 
    8085  VMA_HEAVY_ASSERT(Validate());
    8086  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
    8087  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8088 
    8089  return true;
    8090 }
    8091 
    8092 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    8093 {
    8094  uint32_t lostAllocationCount = 0;
    8095  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8096  it != m_Suballocations.end();
    8097  ++it)
    8098  {
    8099  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
    8100  it->hAllocation->CanBecomeLost() &&
    8101  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    8102  {
    8103  it = FreeSuballocation(it);
    8104  ++lostAllocationCount;
    8105  }
    8106  }
    8107  return lostAllocationCount;
    8108 }
    8109 
    8110 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
    8111 {
    8112  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
    8113  it != m_Suballocations.end();
    8114  ++it)
    8115  {
    8116  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    8117  {
    8118  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
    8119  {
    8120  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    8121  return VK_ERROR_VALIDATION_FAILED_EXT;
    8122  }
    8123  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
    8124  {
    8125  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    8126  return VK_ERROR_VALIDATION_FAILED_EXT;
    8127  }
    8128  }
    8129  }
    8130 
    8131  return VK_SUCCESS;
    8132 }
    8133 
    8134 void VmaBlockMetadata_Generic::Alloc(
    8135  const VmaAllocationRequest& request,
    8136  VmaSuballocationType type,
    8137  VkDeviceSize allocSize,
    8138  VmaAllocation hAllocation)
    8139 {
    8140  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    8141  VMA_ASSERT(request.item != m_Suballocations.end());
    8142  VmaSuballocation& suballoc = *request.item;
    8143  // Given suballocation is a free block.
    8144  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8145  // Given offset is inside this suballocation.
    8146  VMA_ASSERT(request.offset >= suballoc.offset);
    8147  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
    8148  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
    8149  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
    8150 
    8151  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
    8152  // it to become used.
    8153  UnregisterFreeSuballocation(request.item);
    8154 
    8155  suballoc.offset = request.offset;
    8156  suballoc.size = allocSize;
    8157  suballoc.type = type;
    8158  suballoc.hAllocation = hAllocation;
    8159 
    8160  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
    8161  if(paddingEnd)
    8162  {
    8163  VmaSuballocation paddingSuballoc = {};
    8164  paddingSuballoc.offset = request.offset + allocSize;
    8165  paddingSuballoc.size = paddingEnd;
    8166  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8167  VmaSuballocationList::iterator next = request.item;
    8168  ++next;
    8169  const VmaSuballocationList::iterator paddingEndItem =
    8170  m_Suballocations.insert(next, paddingSuballoc);
    8171  RegisterFreeSuballocation(paddingEndItem);
    8172  }
    8173 
    8174  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
    8175  if(paddingBegin)
    8176  {
    8177  VmaSuballocation paddingSuballoc = {};
    8178  paddingSuballoc.offset = request.offset - paddingBegin;
    8179  paddingSuballoc.size = paddingBegin;
    8180  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8181  const VmaSuballocationList::iterator paddingBeginItem =
    8182  m_Suballocations.insert(request.item, paddingSuballoc);
    8183  RegisterFreeSuballocation(paddingBeginItem);
    8184  }
    8185 
    8186  // Update totals.
    8187  m_FreeCount = m_FreeCount - 1;
    8188  if(paddingBegin > 0)
    8189  {
    8190  ++m_FreeCount;
    8191  }
    8192  if(paddingEnd > 0)
    8193  {
    8194  ++m_FreeCount;
    8195  }
    8196  m_SumFreeSize -= allocSize;
    8197 }
    8198 
    8199 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
    8200 {
    8201  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8202  suballocItem != m_Suballocations.end();
    8203  ++suballocItem)
    8204  {
    8205  VmaSuballocation& suballoc = *suballocItem;
    8206  if(suballoc.hAllocation == allocation)
    8207  {
    8208  FreeSuballocation(suballocItem);
    8209  VMA_HEAVY_ASSERT(Validate());
    8210  return;
    8211  }
    8212  }
    8213  VMA_ASSERT(0 && "Not found!");
    8214 }
    8215 
    8216 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
    8217 {
    8218  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
    8219  suballocItem != m_Suballocations.end();
    8220  ++suballocItem)
    8221  {
    8222  VmaSuballocation& suballoc = *suballocItem;
    8223  if(suballoc.offset == offset)
    8224  {
    8225  FreeSuballocation(suballocItem);
    8226  return;
    8227  }
    8228  }
    8229  VMA_ASSERT(0 && "Not found!");
    8230 }
    8231 
    8232 bool VmaBlockMetadata_Generic::ResizeAllocation(const VmaAllocation alloc, VkDeviceSize newSize)
    8233 {
    8234  typedef VmaSuballocationList::iterator iter_type;
    8235  for(iter_type suballocItem = m_Suballocations.begin();
    8236  suballocItem != m_Suballocations.end();
    8237  ++suballocItem)
    8238  {
    8239  VmaSuballocation& suballoc = *suballocItem;
    8240  if(suballoc.hAllocation == alloc)
    8241  {
    8242  iter_type nextItem = suballocItem;
    8243  ++nextItem;
    8244 
    8245  // Should have been ensured on higher level.
    8246  VMA_ASSERT(newSize != alloc->GetSize() && newSize > 0);
    8247 
    8248  // Shrinking.
    8249  if(newSize < alloc->GetSize())
    8250  {
    8251  const VkDeviceSize sizeDiff = suballoc.size - newSize;
    8252 
    8253  // There is next item.
    8254  if(nextItem != m_Suballocations.end())
    8255  {
    8256  // Next item is free.
    8257  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8258  {
    8259  // Grow this next item backward.
    8260  UnregisterFreeSuballocation(nextItem);
    8261  nextItem->offset -= sizeDiff;
    8262  nextItem->size += sizeDiff;
    8263  RegisterFreeSuballocation(nextItem);
    8264  }
    8265  // Next item is not free.
    8266  else
    8267  {
    8268  // Create free item after current one.
    8269  VmaSuballocation newFreeSuballoc;
    8270  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8271  newFreeSuballoc.offset = suballoc.offset + newSize;
    8272  newFreeSuballoc.size = sizeDiff;
    8273  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8274  iter_type newFreeSuballocIt = m_Suballocations.insert(nextItem, newFreeSuballoc);
    8275  RegisterFreeSuballocation(newFreeSuballocIt);
    8276 
    8277  ++m_FreeCount;
    8278  }
    8279  }
    8280  // This is the last item.
    8281  else
    8282  {
    8283  // Create free item at the end.
    8284  VmaSuballocation newFreeSuballoc;
    8285  newFreeSuballoc.hAllocation = VK_NULL_HANDLE;
    8286  newFreeSuballoc.offset = suballoc.offset + newSize;
    8287  newFreeSuballoc.size = sizeDiff;
    8288  newFreeSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8289  m_Suballocations.push_back(newFreeSuballoc);
    8290 
    8291  iter_type newFreeSuballocIt = m_Suballocations.end();
    8292  RegisterFreeSuballocation(--newFreeSuballocIt);
    8293 
    8294  ++m_FreeCount;
    8295  }
    8296 
    8297  suballoc.size = newSize;
    8298  m_SumFreeSize += sizeDiff;
    8299  }
    8300  // Growing.
    8301  else
    8302  {
    8303  const VkDeviceSize sizeDiff = newSize - suballoc.size;
    8304 
    8305  // There is next item.
    8306  if(nextItem != m_Suballocations.end())
    8307  {
    8308  // Next item is free.
    8309  if(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8310  {
    8311  // There is not enough free space, including margin.
    8312  if(nextItem->size < sizeDiff + VMA_DEBUG_MARGIN)
    8313  {
    8314  return false;
    8315  }
    8316 
    8317  // There is more free space than required.
    8318  if(nextItem->size > sizeDiff)
    8319  {
    8320  // Move and shrink this next item.
    8321  UnregisterFreeSuballocation(nextItem);
    8322  nextItem->offset += sizeDiff;
    8323  nextItem->size -= sizeDiff;
    8324  RegisterFreeSuballocation(nextItem);
    8325  }
    8326  // There is exactly the amount of free space required.
    8327  else
    8328  {
    8329  // Remove this next free item.
    8330  UnregisterFreeSuballocation(nextItem);
    8331  m_Suballocations.erase(nextItem);
    8332  --m_FreeCount;
    8333  }
    8334  }
    8335  // Next item is not free - there is no space to grow.
    8336  else
    8337  {
    8338  return false;
    8339  }
    8340  }
    8341  // This is the last item - there is no space to grow.
    8342  else
    8343  {
    8344  return false;
    8345  }
    8346 
    8347  suballoc.size = newSize;
    8348  m_SumFreeSize -= sizeDiff;
    8349  }
    8350 
    8351  // We cannot call Validate() here because alloc object is updated to new size outside of this call.
    8352  return true;
    8353  }
    8354  }
    8355  VMA_ASSERT(0 && "Not found!");
    8356  return false;
    8357 }
    8358 
    8359 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
    8360 {
    8361  VkDeviceSize lastSize = 0;
    8362  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
    8363  {
    8364  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
    8365 
    8366  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
    8367  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
    8368  VMA_VALIDATE(it->size >= lastSize);
    8369  lastSize = it->size;
    8370  }
    8371  return true;
    8372 }
    8373 
    8374 bool VmaBlockMetadata_Generic::CheckAllocation(
    8375  uint32_t currentFrameIndex,
    8376  uint32_t frameInUseCount,
    8377  VkDeviceSize bufferImageGranularity,
    8378  VkDeviceSize allocSize,
    8379  VkDeviceSize allocAlignment,
    8380  VmaSuballocationType allocType,
    8381  VmaSuballocationList::const_iterator suballocItem,
    8382  bool canMakeOtherLost,
    8383  VkDeviceSize* pOffset,
    8384  size_t* itemsToMakeLostCount,
    8385  VkDeviceSize* pSumFreeSize,
    8386  VkDeviceSize* pSumItemSize) const
    8387 {
    8388  VMA_ASSERT(allocSize > 0);
    8389  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    8390  VMA_ASSERT(suballocItem != m_Suballocations.cend());
    8391  VMA_ASSERT(pOffset != VMA_NULL);
    8392 
    8393  *itemsToMakeLostCount = 0;
    8394  *pSumFreeSize = 0;
    8395  *pSumItemSize = 0;
    8396 
    8397  if(canMakeOtherLost)
    8398  {
    8399  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8400  {
    8401  *pSumFreeSize = suballocItem->size;
    8402  }
    8403  else
    8404  {
    8405  if(suballocItem->hAllocation->CanBecomeLost() &&
    8406  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8407  {
    8408  ++*itemsToMakeLostCount;
    8409  *pSumItemSize = suballocItem->size;
    8410  }
    8411  else
    8412  {
    8413  return false;
    8414  }
    8415  }
    8416 
    8417  // Remaining size is too small for this request: Early return.
    8418  if(GetSize() - suballocItem->offset < allocSize)
    8419  {
    8420  return false;
    8421  }
    8422 
    8423  // Start from offset equal to beginning of this suballocation.
    8424  *pOffset = suballocItem->offset;
    8425 
    8426  // Apply VMA_DEBUG_MARGIN at the beginning.
    8427  if(VMA_DEBUG_MARGIN > 0)
    8428  {
    8429  *pOffset += VMA_DEBUG_MARGIN;
    8430  }
    8431 
    8432  // Apply alignment.
    8433  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8434 
    8435  // Check previous suballocations for BufferImageGranularity conflicts.
    8436  // Make bigger alignment if necessary.
    8437  if(bufferImageGranularity > 1)
    8438  {
    8439  bool bufferImageGranularityConflict = false;
    8440  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8441  while(prevSuballocItem != m_Suballocations.cbegin())
    8442  {
    8443  --prevSuballocItem;
    8444  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8445  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8446  {
    8447  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8448  {
    8449  bufferImageGranularityConflict = true;
    8450  break;
    8451  }
    8452  }
    8453  else
    8454  // Already on previous page.
    8455  break;
    8456  }
    8457  if(bufferImageGranularityConflict)
    8458  {
    8459  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8460  }
    8461  }
    8462 
    8463  // Now that we have final *pOffset, check if we are past suballocItem.
    8464  // If yes, return false - this function should be called for another suballocItem as starting point.
    8465  if(*pOffset >= suballocItem->offset + suballocItem->size)
    8466  {
    8467  return false;
    8468  }
    8469 
    8470  // Calculate padding at the beginning based on current offset.
    8471  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
    8472 
    8473  // Calculate required margin at the end.
    8474  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8475 
    8476  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
    8477  // Another early return check.
    8478  if(suballocItem->offset + totalSize > GetSize())
    8479  {
    8480  return false;
    8481  }
    8482 
    8483  // Advance lastSuballocItem until desired size is reached.
    8484  // Update itemsToMakeLostCount.
    8485  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
    8486  if(totalSize > suballocItem->size)
    8487  {
    8488  VkDeviceSize remainingSize = totalSize - suballocItem->size;
    8489  while(remainingSize > 0)
    8490  {
    8491  ++lastSuballocItem;
    8492  if(lastSuballocItem == m_Suballocations.cend())
    8493  {
    8494  return false;
    8495  }
    8496  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8497  {
    8498  *pSumFreeSize += lastSuballocItem->size;
    8499  }
    8500  else
    8501  {
    8502  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
    8503  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
    8504  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8505  {
    8506  ++*itemsToMakeLostCount;
    8507  *pSumItemSize += lastSuballocItem->size;
    8508  }
    8509  else
    8510  {
    8511  return false;
    8512  }
    8513  }
    8514  remainingSize = (lastSuballocItem->size < remainingSize) ?
    8515  remainingSize - lastSuballocItem->size : 0;
    8516  }
    8517  }
    8518 
    8519  // Check next suballocations for BufferImageGranularity conflicts.
    8520  // If conflict exists, we must mark more allocations lost or fail.
    8521  if(bufferImageGranularity > 1)
    8522  {
    8523  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
    8524  ++nextSuballocItem;
    8525  while(nextSuballocItem != m_Suballocations.cend())
    8526  {
    8527  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8528  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8529  {
    8530  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8531  {
    8532  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
    8533  if(nextSuballoc.hAllocation->CanBecomeLost() &&
    8534  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    8535  {
    8536  ++*itemsToMakeLostCount;
    8537  }
    8538  else
    8539  {
    8540  return false;
    8541  }
    8542  }
    8543  }
    8544  else
    8545  {
    8546  // Already on next page.
    8547  break;
    8548  }
    8549  ++nextSuballocItem;
    8550  }
    8551  }
    8552  }
    8553  else
    8554  {
    8555  const VmaSuballocation& suballoc = *suballocItem;
    8556  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8557 
    8558  *pSumFreeSize = suballoc.size;
    8559 
    8560  // Size of this suballocation is too small for this request: Early return.
    8561  if(suballoc.size < allocSize)
    8562  {
    8563  return false;
    8564  }
    8565 
    8566  // Start from offset equal to beginning of this suballocation.
    8567  *pOffset = suballoc.offset;
    8568 
    8569  // Apply VMA_DEBUG_MARGIN at the beginning.
    8570  if(VMA_DEBUG_MARGIN > 0)
    8571  {
    8572  *pOffset += VMA_DEBUG_MARGIN;
    8573  }
    8574 
    8575  // Apply alignment.
    8576  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
    8577 
    8578  // Check previous suballocations for BufferImageGranularity conflicts.
    8579  // Make bigger alignment if necessary.
    8580  if(bufferImageGranularity > 1)
    8581  {
    8582  bool bufferImageGranularityConflict = false;
    8583  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
    8584  while(prevSuballocItem != m_Suballocations.cbegin())
    8585  {
    8586  --prevSuballocItem;
    8587  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
    8588  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
    8589  {
    8590  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    8591  {
    8592  bufferImageGranularityConflict = true;
    8593  break;
    8594  }
    8595  }
    8596  else
    8597  // Already on previous page.
    8598  break;
    8599  }
    8600  if(bufferImageGranularityConflict)
    8601  {
    8602  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
    8603  }
    8604  }
    8605 
    8606  // Calculate padding at the beginning based on current offset.
    8607  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
    8608 
    8609  // Calculate required margin at the end.
    8610  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
    8611 
    8612  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
    8613  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
    8614  {
    8615  return false;
    8616  }
    8617 
    8618  // Check next suballocations for BufferImageGranularity conflicts.
    8619  // If conflict exists, allocation cannot be made here.
    8620  if(bufferImageGranularity > 1)
    8621  {
    8622  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
    8623  ++nextSuballocItem;
    8624  while(nextSuballocItem != m_Suballocations.cend())
    8625  {
    8626  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
    8627  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    8628  {
    8629  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    8630  {
    8631  return false;
    8632  }
    8633  }
    8634  else
    8635  {
    8636  // Already on next page.
    8637  break;
    8638  }
    8639  ++nextSuballocItem;
    8640  }
    8641  }
    8642  }
    8643 
    8644  // All tests passed: Success. pOffset is already filled.
    8645  return true;
    8646 }
    8647 
    8648 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
    8649 {
    8650  VMA_ASSERT(item != m_Suballocations.end());
    8651  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8652 
    8653  VmaSuballocationList::iterator nextItem = item;
    8654  ++nextItem;
    8655  VMA_ASSERT(nextItem != m_Suballocations.end());
    8656  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
    8657 
    8658  item->size += nextItem->size;
    8659  --m_FreeCount;
    8660  m_Suballocations.erase(nextItem);
    8661 }
    8662 
    8663 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
    8664 {
    8665  // Change this suballocation to be marked as free.
    8666  VmaSuballocation& suballoc = *suballocItem;
    8667  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    8668  suballoc.hAllocation = VK_NULL_HANDLE;
    8669 
    8670  // Update totals.
    8671  ++m_FreeCount;
    8672  m_SumFreeSize += suballoc.size;
    8673 
    8674  // Merge with previous and/or next suballocation if it's also free.
    8675  bool mergeWithNext = false;
    8676  bool mergeWithPrev = false;
    8677 
    8678  VmaSuballocationList::iterator nextItem = suballocItem;
    8679  ++nextItem;
    8680  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
    8681  {
    8682  mergeWithNext = true;
    8683  }
    8684 
    8685  VmaSuballocationList::iterator prevItem = suballocItem;
    8686  if(suballocItem != m_Suballocations.begin())
    8687  {
    8688  --prevItem;
    8689  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
    8690  {
    8691  mergeWithPrev = true;
    8692  }
    8693  }
    8694 
    8695  if(mergeWithNext)
    8696  {
    8697  UnregisterFreeSuballocation(nextItem);
    8698  MergeFreeWithNext(suballocItem);
    8699  }
    8700 
    8701  if(mergeWithPrev)
    8702  {
    8703  UnregisterFreeSuballocation(prevItem);
    8704  MergeFreeWithNext(prevItem);
    8705  RegisterFreeSuballocation(prevItem);
    8706  return prevItem;
    8707  }
    8708  else
    8709  {
    8710  RegisterFreeSuballocation(suballocItem);
    8711  return suballocItem;
    8712  }
    8713 }
    8714 
    8715 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
    8716 {
    8717  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8718  VMA_ASSERT(item->size > 0);
    8719 
    8720  // You may want to enable this validation at the beginning or at the end of
    8721  // this function, depending on what do you want to check.
    8722  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8723 
    8724  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8725  {
    8726  if(m_FreeSuballocationsBySize.empty())
    8727  {
    8728  m_FreeSuballocationsBySize.push_back(item);
    8729  }
    8730  else
    8731  {
    8732  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
    8733  }
    8734  }
    8735 
    8736  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8737 }
    8738 
    8739 
    8740 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
    8741 {
    8742  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
    8743  VMA_ASSERT(item->size > 0);
    8744 
    8745  // You may want to enable this validation at the beginning or at the end of
    8746  // this function, depending on what do you want to check.
    8747  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8748 
    8749  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    8750  {
    8751  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
    8752  m_FreeSuballocationsBySize.data(),
    8753  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
    8754  item,
    8755  VmaSuballocationItemSizeLess());
    8756  for(size_t index = it - m_FreeSuballocationsBySize.data();
    8757  index < m_FreeSuballocationsBySize.size();
    8758  ++index)
    8759  {
    8760  if(m_FreeSuballocationsBySize[index] == item)
    8761  {
    8762  VmaVectorRemove(m_FreeSuballocationsBySize, index);
    8763  return;
    8764  }
    8765  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
    8766  }
    8767  VMA_ASSERT(0 && "Not found.");
    8768  }
    8769 
    8770  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
    8771 }
    8772 
    8773 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
    8774  VkDeviceSize bufferImageGranularity,
    8775  VmaSuballocationType& inOutPrevSuballocType) const
    8776 {
    8777  if(bufferImageGranularity == 1 || IsEmpty())
    8778  {
    8779  return false;
    8780  }
    8781 
    8782  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
    8783  bool typeConflictFound = false;
    8784  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
    8785  it != m_Suballocations.cend();
    8786  ++it)
    8787  {
    8788  const VmaSuballocationType suballocType = it->type;
    8789  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
    8790  {
    8791  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
    8792  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
    8793  {
    8794  typeConflictFound = true;
    8795  }
    8796  inOutPrevSuballocType = suballocType;
    8797  }
    8798  }
    8799 
    8800  return typeConflictFound || minAlignment >= bufferImageGranularity;
    8801 }
    8802 
    8804 // class VmaBlockMetadata_Linear
    8805 
    8806 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
    8807  VmaBlockMetadata(hAllocator),
    8808  m_SumFreeSize(0),
    8809  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8810  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
    8811  m_1stVectorIndex(0),
    8812  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
    8813  m_1stNullItemsBeginCount(0),
    8814  m_1stNullItemsMiddleCount(0),
    8815  m_2ndNullItemsCount(0)
    8816 {
    8817 }
    8818 
    8819 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
    8820 {
    8821 }
    8822 
    8823 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
    8824 {
    8825  VmaBlockMetadata::Init(size);
    8826  m_SumFreeSize = size;
    8827 }
    8828 
    8829 bool VmaBlockMetadata_Linear::Validate() const
    8830 {
    8831  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8832  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    8833 
    8834  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
    8835  VMA_VALIDATE(!suballocations1st.empty() ||
    8836  suballocations2nd.empty() ||
    8837  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
    8838 
    8839  if(!suballocations1st.empty())
    8840  {
    8841  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
    8842  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
    8843  // Null item at the end should be just pop_back().
    8844  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
    8845  }
    8846  if(!suballocations2nd.empty())
    8847  {
    8848  // Null item at the end should be just pop_back().
    8849  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
    8850  }
    8851 
    8852  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
    8853  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
    8854 
    8855  VkDeviceSize sumUsedSize = 0;
    8856  const size_t suballoc1stCount = suballocations1st.size();
    8857  VkDeviceSize offset = VMA_DEBUG_MARGIN;
    8858 
    8859  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    8860  {
    8861  const size_t suballoc2ndCount = suballocations2nd.size();
    8862  size_t nullItem2ndCount = 0;
    8863  for(size_t i = 0; i < suballoc2ndCount; ++i)
    8864  {
    8865  const VmaSuballocation& suballoc = suballocations2nd[i];
    8866  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8867 
    8868  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8869  VMA_VALIDATE(suballoc.offset >= offset);
    8870 
    8871  if(!currFree)
    8872  {
    8873  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8874  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8875  sumUsedSize += suballoc.size;
    8876  }
    8877  else
    8878  {
    8879  ++nullItem2ndCount;
    8880  }
    8881 
    8882  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8883  }
    8884 
    8885  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8886  }
    8887 
    8888  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
    8889  {
    8890  const VmaSuballocation& suballoc = suballocations1st[i];
    8891  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
    8892  suballoc.hAllocation == VK_NULL_HANDLE);
    8893  }
    8894 
    8895  size_t nullItem1stCount = m_1stNullItemsBeginCount;
    8896 
    8897  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
    8898  {
    8899  const VmaSuballocation& suballoc = suballocations1st[i];
    8900  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8901 
    8902  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8903  VMA_VALIDATE(suballoc.offset >= offset);
    8904  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
    8905 
    8906  if(!currFree)
    8907  {
    8908  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8909  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8910  sumUsedSize += suballoc.size;
    8911  }
    8912  else
    8913  {
    8914  ++nullItem1stCount;
    8915  }
    8916 
    8917  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8918  }
    8919  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
    8920 
    8921  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    8922  {
    8923  const size_t suballoc2ndCount = suballocations2nd.size();
    8924  size_t nullItem2ndCount = 0;
    8925  for(size_t i = suballoc2ndCount; i--; )
    8926  {
    8927  const VmaSuballocation& suballoc = suballocations2nd[i];
    8928  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
    8929 
    8930  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
    8931  VMA_VALIDATE(suballoc.offset >= offset);
    8932 
    8933  if(!currFree)
    8934  {
    8935  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
    8936  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
    8937  sumUsedSize += suballoc.size;
    8938  }
    8939  else
    8940  {
    8941  ++nullItem2ndCount;
    8942  }
    8943 
    8944  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
    8945  }
    8946 
    8947  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
    8948  }
    8949 
    8950  VMA_VALIDATE(offset <= GetSize());
    8951  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
    8952 
    8953  return true;
    8954 }
    8955 
    8956 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
    8957 {
    8958  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
    8959  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
    8960 }
    8961 
    8962 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
    8963 {
    8964  const VkDeviceSize size = GetSize();
    8965 
    8966  /*
    8967  We don't consider gaps inside allocation vectors with freed allocations because
    8968  they are not suitable for reuse in linear allocator. We consider only space that
    8969  is available for new allocations.
    8970  */
    8971  if(IsEmpty())
    8972  {
    8973  return size;
    8974  }
    8975 
    8976  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    8977 
    8978  switch(m_2ndVectorMode)
    8979  {
    8980  case SECOND_VECTOR_EMPTY:
    8981  /*
    8982  Available space is after end of 1st, as well as before beginning of 1st (which
    8983  whould make it a ring buffer).
    8984  */
    8985  {
    8986  const size_t suballocations1stCount = suballocations1st.size();
    8987  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
    8988  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    8989  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
    8990  return VMA_MAX(
    8991  firstSuballoc.offset,
    8992  size - (lastSuballoc.offset + lastSuballoc.size));
    8993  }
    8994  break;
    8995 
    8996  case SECOND_VECTOR_RING_BUFFER:
    8997  /*
    8998  Available space is only between end of 2nd and beginning of 1st.
    8999  */
    9000  {
    9001  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9002  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
    9003  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
    9004  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
    9005  }
    9006  break;
    9007 
    9008  case SECOND_VECTOR_DOUBLE_STACK:
    9009  /*
    9010  Available space is only between end of 1st and top of 2nd.
    9011  */
    9012  {
    9013  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9014  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
    9015  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
    9016  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
    9017  }
    9018  break;
    9019 
    9020  default:
    9021  VMA_ASSERT(0);
    9022  return 0;
    9023  }
    9024 }
    9025 
    9026 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    9027 {
    9028  const VkDeviceSize size = GetSize();
    9029  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9030  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9031  const size_t suballoc1stCount = suballocations1st.size();
    9032  const size_t suballoc2ndCount = suballocations2nd.size();
    9033 
    9034  outInfo.blockCount = 1;
    9035  outInfo.allocationCount = (uint32_t)GetAllocationCount();
    9036  outInfo.unusedRangeCount = 0;
    9037  outInfo.usedBytes = 0;
    9038  outInfo.allocationSizeMin = UINT64_MAX;
    9039  outInfo.allocationSizeMax = 0;
    9040  outInfo.unusedRangeSizeMin = UINT64_MAX;
    9041  outInfo.unusedRangeSizeMax = 0;
    9042 
    9043  VkDeviceSize lastOffset = 0;
    9044 
    9045  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9046  {
    9047  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9048  size_t nextAlloc2ndIndex = 0;
    9049  while(lastOffset < freeSpace2ndTo1stEnd)
    9050  {
    9051  // Find next non-null allocation or move nextAllocIndex to the end.
    9052  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9053  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9054  {
    9055  ++nextAlloc2ndIndex;
    9056  }
    9057 
    9058  // Found non-null allocation.
    9059  if(nextAlloc2ndIndex < suballoc2ndCount)
    9060  {
    9061  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9062 
    9063  // 1. Process free space before this allocation.
    9064  if(lastOffset < suballoc.offset)
    9065  {
    9066  // There is free space from lastOffset to suballoc.offset.
    9067  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9068  ++outInfo.unusedRangeCount;
    9069  outInfo.unusedBytes += unusedRangeSize;
    9070  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9071  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9072  }
    9073 
    9074  // 2. Process this allocation.
    9075  // There is allocation with suballoc.offset, suballoc.size.
    9076  outInfo.usedBytes += suballoc.size;
    9077  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9078  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9079 
    9080  // 3. Prepare for next iteration.
    9081  lastOffset = suballoc.offset + suballoc.size;
    9082  ++nextAlloc2ndIndex;
    9083  }
    9084  // We are at the end.
    9085  else
    9086  {
    9087  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9088  if(lastOffset < freeSpace2ndTo1stEnd)
    9089  {
    9090  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9091  ++outInfo.unusedRangeCount;
    9092  outInfo.unusedBytes += unusedRangeSize;
    9093  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9094  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9095  }
    9096 
    9097  // End of loop.
    9098  lastOffset = freeSpace2ndTo1stEnd;
    9099  }
    9100  }
    9101  }
    9102 
    9103  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9104  const VkDeviceSize freeSpace1stTo2ndEnd =
    9105  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9106  while(lastOffset < freeSpace1stTo2ndEnd)
    9107  {
    9108  // Find next non-null allocation or move nextAllocIndex to the end.
    9109  while(nextAlloc1stIndex < suballoc1stCount &&
    9110  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9111  {
    9112  ++nextAlloc1stIndex;
    9113  }
    9114 
    9115  // Found non-null allocation.
    9116  if(nextAlloc1stIndex < suballoc1stCount)
    9117  {
    9118  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9119 
    9120  // 1. Process free space before this allocation.
    9121  if(lastOffset < suballoc.offset)
    9122  {
    9123  // There is free space from lastOffset to suballoc.offset.
    9124  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9125  ++outInfo.unusedRangeCount;
    9126  outInfo.unusedBytes += unusedRangeSize;
    9127  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9128  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9129  }
    9130 
    9131  // 2. Process this allocation.
    9132  // There is allocation with suballoc.offset, suballoc.size.
    9133  outInfo.usedBytes += suballoc.size;
    9134  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9135  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9136 
    9137  // 3. Prepare for next iteration.
    9138  lastOffset = suballoc.offset + suballoc.size;
    9139  ++nextAlloc1stIndex;
    9140  }
    9141  // We are at the end.
    9142  else
    9143  {
    9144  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9145  if(lastOffset < freeSpace1stTo2ndEnd)
    9146  {
    9147  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9148  ++outInfo.unusedRangeCount;
    9149  outInfo.unusedBytes += unusedRangeSize;
    9150  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9151  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9152  }
    9153 
    9154  // End of loop.
    9155  lastOffset = freeSpace1stTo2ndEnd;
    9156  }
    9157  }
    9158 
    9159  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9160  {
    9161  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9162  while(lastOffset < size)
    9163  {
    9164  // Find next non-null allocation or move nextAllocIndex to the end.
    9165  while(nextAlloc2ndIndex != SIZE_MAX &&
    9166  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9167  {
    9168  --nextAlloc2ndIndex;
    9169  }
    9170 
    9171  // Found non-null allocation.
    9172  if(nextAlloc2ndIndex != SIZE_MAX)
    9173  {
    9174  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9175 
    9176  // 1. Process free space before this allocation.
    9177  if(lastOffset < suballoc.offset)
    9178  {
    9179  // There is free space from lastOffset to suballoc.offset.
    9180  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9181  ++outInfo.unusedRangeCount;
    9182  outInfo.unusedBytes += unusedRangeSize;
    9183  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9184  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9185  }
    9186 
    9187  // 2. Process this allocation.
    9188  // There is allocation with suballoc.offset, suballoc.size.
    9189  outInfo.usedBytes += suballoc.size;
    9190  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
    9191  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
    9192 
    9193  // 3. Prepare for next iteration.
    9194  lastOffset = suballoc.offset + suballoc.size;
    9195  --nextAlloc2ndIndex;
    9196  }
    9197  // We are at the end.
    9198  else
    9199  {
    9200  // There is free space from lastOffset to size.
    9201  if(lastOffset < size)
    9202  {
    9203  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9204  ++outInfo.unusedRangeCount;
    9205  outInfo.unusedBytes += unusedRangeSize;
    9206  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
    9207  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
    9208  }
    9209 
    9210  // End of loop.
    9211  lastOffset = size;
    9212  }
    9213  }
    9214  }
    9215 
    9216  outInfo.unusedBytes = size - outInfo.usedBytes;
    9217 }
    9218 
    9219 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
    9220 {
    9221  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9222  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9223  const VkDeviceSize size = GetSize();
    9224  const size_t suballoc1stCount = suballocations1st.size();
    9225  const size_t suballoc2ndCount = suballocations2nd.size();
    9226 
    9227  inoutStats.size += size;
    9228 
    9229  VkDeviceSize lastOffset = 0;
    9230 
    9231  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9232  {
    9233  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9234  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
    9235  while(lastOffset < freeSpace2ndTo1stEnd)
    9236  {
    9237  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9238  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9239  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9240  {
    9241  ++nextAlloc2ndIndex;
    9242  }
    9243 
    9244  // Found non-null allocation.
    9245  if(nextAlloc2ndIndex < suballoc2ndCount)
    9246  {
    9247  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9248 
    9249  // 1. Process free space before this allocation.
    9250  if(lastOffset < suballoc.offset)
    9251  {
    9252  // There is free space from lastOffset to suballoc.offset.
    9253  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9254  inoutStats.unusedSize += unusedRangeSize;
    9255  ++inoutStats.unusedRangeCount;
    9256  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9257  }
    9258 
    9259  // 2. Process this allocation.
    9260  // There is allocation with suballoc.offset, suballoc.size.
    9261  ++inoutStats.allocationCount;
    9262 
    9263  // 3. Prepare for next iteration.
    9264  lastOffset = suballoc.offset + suballoc.size;
    9265  ++nextAlloc2ndIndex;
    9266  }
    9267  // We are at the end.
    9268  else
    9269  {
    9270  if(lastOffset < freeSpace2ndTo1stEnd)
    9271  {
    9272  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9273  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9274  inoutStats.unusedSize += unusedRangeSize;
    9275  ++inoutStats.unusedRangeCount;
    9276  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9277  }
    9278 
    9279  // End of loop.
    9280  lastOffset = freeSpace2ndTo1stEnd;
    9281  }
    9282  }
    9283  }
    9284 
    9285  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9286  const VkDeviceSize freeSpace1stTo2ndEnd =
    9287  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9288  while(lastOffset < freeSpace1stTo2ndEnd)
    9289  {
    9290  // Find next non-null allocation or move nextAllocIndex to the end.
    9291  while(nextAlloc1stIndex < suballoc1stCount &&
    9292  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9293  {
    9294  ++nextAlloc1stIndex;
    9295  }
    9296 
    9297  // Found non-null allocation.
    9298  if(nextAlloc1stIndex < suballoc1stCount)
    9299  {
    9300  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9301 
    9302  // 1. Process free space before this allocation.
    9303  if(lastOffset < suballoc.offset)
    9304  {
    9305  // There is free space from lastOffset to suballoc.offset.
    9306  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9307  inoutStats.unusedSize += unusedRangeSize;
    9308  ++inoutStats.unusedRangeCount;
    9309  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9310  }
    9311 
    9312  // 2. Process this allocation.
    9313  // There is allocation with suballoc.offset, suballoc.size.
    9314  ++inoutStats.allocationCount;
    9315 
    9316  // 3. Prepare for next iteration.
    9317  lastOffset = suballoc.offset + suballoc.size;
    9318  ++nextAlloc1stIndex;
    9319  }
    9320  // We are at the end.
    9321  else
    9322  {
    9323  if(lastOffset < freeSpace1stTo2ndEnd)
    9324  {
    9325  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9326  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9327  inoutStats.unusedSize += unusedRangeSize;
    9328  ++inoutStats.unusedRangeCount;
    9329  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9330  }
    9331 
    9332  // End of loop.
    9333  lastOffset = freeSpace1stTo2ndEnd;
    9334  }
    9335  }
    9336 
    9337  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9338  {
    9339  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9340  while(lastOffset < size)
    9341  {
    9342  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9343  while(nextAlloc2ndIndex != SIZE_MAX &&
    9344  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9345  {
    9346  --nextAlloc2ndIndex;
    9347  }
    9348 
    9349  // Found non-null allocation.
    9350  if(nextAlloc2ndIndex != SIZE_MAX)
    9351  {
    9352  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9353 
    9354  // 1. Process free space before this allocation.
    9355  if(lastOffset < suballoc.offset)
    9356  {
    9357  // There is free space from lastOffset to suballoc.offset.
    9358  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9359  inoutStats.unusedSize += unusedRangeSize;
    9360  ++inoutStats.unusedRangeCount;
    9361  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9362  }
    9363 
    9364  // 2. Process this allocation.
    9365  // There is allocation with suballoc.offset, suballoc.size.
    9366  ++inoutStats.allocationCount;
    9367 
    9368  // 3. Prepare for next iteration.
    9369  lastOffset = suballoc.offset + suballoc.size;
    9370  --nextAlloc2ndIndex;
    9371  }
    9372  // We are at the end.
    9373  else
    9374  {
    9375  if(lastOffset < size)
    9376  {
    9377  // There is free space from lastOffset to size.
    9378  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9379  inoutStats.unusedSize += unusedRangeSize;
    9380  ++inoutStats.unusedRangeCount;
    9381  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
    9382  }
    9383 
    9384  // End of loop.
    9385  lastOffset = size;
    9386  }
    9387  }
    9388  }
    9389 }
    9390 
    9391 #if VMA_STATS_STRING_ENABLED
    9392 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
    9393 {
    9394  const VkDeviceSize size = GetSize();
    9395  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9396  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9397  const size_t suballoc1stCount = suballocations1st.size();
    9398  const size_t suballoc2ndCount = suballocations2nd.size();
    9399 
    9400  // FIRST PASS
    9401 
    9402  size_t unusedRangeCount = 0;
    9403  VkDeviceSize usedBytes = 0;
    9404 
    9405  VkDeviceSize lastOffset = 0;
    9406 
    9407  size_t alloc2ndCount = 0;
    9408  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9409  {
    9410  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9411  size_t nextAlloc2ndIndex = 0;
    9412  while(lastOffset < freeSpace2ndTo1stEnd)
    9413  {
    9414  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9415  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9416  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9417  {
    9418  ++nextAlloc2ndIndex;
    9419  }
    9420 
    9421  // Found non-null allocation.
    9422  if(nextAlloc2ndIndex < suballoc2ndCount)
    9423  {
    9424  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9425 
    9426  // 1. Process free space before this allocation.
    9427  if(lastOffset < suballoc.offset)
    9428  {
    9429  // There is free space from lastOffset to suballoc.offset.
    9430  ++unusedRangeCount;
    9431  }
    9432 
    9433  // 2. Process this allocation.
    9434  // There is allocation with suballoc.offset, suballoc.size.
    9435  ++alloc2ndCount;
    9436  usedBytes += suballoc.size;
    9437 
    9438  // 3. Prepare for next iteration.
    9439  lastOffset = suballoc.offset + suballoc.size;
    9440  ++nextAlloc2ndIndex;
    9441  }
    9442  // We are at the end.
    9443  else
    9444  {
    9445  if(lastOffset < freeSpace2ndTo1stEnd)
    9446  {
    9447  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9448  ++unusedRangeCount;
    9449  }
    9450 
    9451  // End of loop.
    9452  lastOffset = freeSpace2ndTo1stEnd;
    9453  }
    9454  }
    9455  }
    9456 
    9457  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9458  size_t alloc1stCount = 0;
    9459  const VkDeviceSize freeSpace1stTo2ndEnd =
    9460  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
    9461  while(lastOffset < freeSpace1stTo2ndEnd)
    9462  {
    9463  // Find next non-null allocation or move nextAllocIndex to the end.
    9464  while(nextAlloc1stIndex < suballoc1stCount &&
    9465  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9466  {
    9467  ++nextAlloc1stIndex;
    9468  }
    9469 
    9470  // Found non-null allocation.
    9471  if(nextAlloc1stIndex < suballoc1stCount)
    9472  {
    9473  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9474 
    9475  // 1. Process free space before this allocation.
    9476  if(lastOffset < suballoc.offset)
    9477  {
    9478  // There is free space from lastOffset to suballoc.offset.
    9479  ++unusedRangeCount;
    9480  }
    9481 
    9482  // 2. Process this allocation.
    9483  // There is allocation with suballoc.offset, suballoc.size.
    9484  ++alloc1stCount;
    9485  usedBytes += suballoc.size;
    9486 
    9487  // 3. Prepare for next iteration.
    9488  lastOffset = suballoc.offset + suballoc.size;
    9489  ++nextAlloc1stIndex;
    9490  }
    9491  // We are at the end.
    9492  else
    9493  {
    9494  if(lastOffset < size)
    9495  {
    9496  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9497  ++unusedRangeCount;
    9498  }
    9499 
    9500  // End of loop.
    9501  lastOffset = freeSpace1stTo2ndEnd;
    9502  }
    9503  }
    9504 
    9505  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9506  {
    9507  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9508  while(lastOffset < size)
    9509  {
    9510  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9511  while(nextAlloc2ndIndex != SIZE_MAX &&
    9512  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9513  {
    9514  --nextAlloc2ndIndex;
    9515  }
    9516 
    9517  // Found non-null allocation.
    9518  if(nextAlloc2ndIndex != SIZE_MAX)
    9519  {
    9520  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9521 
    9522  // 1. Process free space before this allocation.
    9523  if(lastOffset < suballoc.offset)
    9524  {
    9525  // There is free space from lastOffset to suballoc.offset.
    9526  ++unusedRangeCount;
    9527  }
    9528 
    9529  // 2. Process this allocation.
    9530  // There is allocation with suballoc.offset, suballoc.size.
    9531  ++alloc2ndCount;
    9532  usedBytes += suballoc.size;
    9533 
    9534  // 3. Prepare for next iteration.
    9535  lastOffset = suballoc.offset + suballoc.size;
    9536  --nextAlloc2ndIndex;
    9537  }
    9538  // We are at the end.
    9539  else
    9540  {
    9541  if(lastOffset < size)
    9542  {
    9543  // There is free space from lastOffset to size.
    9544  ++unusedRangeCount;
    9545  }
    9546 
    9547  // End of loop.
    9548  lastOffset = size;
    9549  }
    9550  }
    9551  }
    9552 
    9553  const VkDeviceSize unusedBytes = size - usedBytes;
    9554  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
    9555 
    9556  // SECOND PASS
    9557  lastOffset = 0;
    9558 
    9559  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9560  {
    9561  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
    9562  size_t nextAlloc2ndIndex = 0;
    9563  while(lastOffset < freeSpace2ndTo1stEnd)
    9564  {
    9565  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9566  while(nextAlloc2ndIndex < suballoc2ndCount &&
    9567  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9568  {
    9569  ++nextAlloc2ndIndex;
    9570  }
    9571 
    9572  // Found non-null allocation.
    9573  if(nextAlloc2ndIndex < suballoc2ndCount)
    9574  {
    9575  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9576 
    9577  // 1. Process free space before this allocation.
    9578  if(lastOffset < suballoc.offset)
    9579  {
    9580  // There is free space from lastOffset to suballoc.offset.
    9581  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9582  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9583  }
    9584 
    9585  // 2. Process this allocation.
    9586  // There is allocation with suballoc.offset, suballoc.size.
    9587  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9588 
    9589  // 3. Prepare for next iteration.
    9590  lastOffset = suballoc.offset + suballoc.size;
    9591  ++nextAlloc2ndIndex;
    9592  }
    9593  // We are at the end.
    9594  else
    9595  {
    9596  if(lastOffset < freeSpace2ndTo1stEnd)
    9597  {
    9598  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
    9599  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
    9600  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9601  }
    9602 
    9603  // End of loop.
    9604  lastOffset = freeSpace2ndTo1stEnd;
    9605  }
    9606  }
    9607  }
    9608 
    9609  nextAlloc1stIndex = m_1stNullItemsBeginCount;
    9610  while(lastOffset < freeSpace1stTo2ndEnd)
    9611  {
    9612  // Find next non-null allocation or move nextAllocIndex to the end.
    9613  while(nextAlloc1stIndex < suballoc1stCount &&
    9614  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
    9615  {
    9616  ++nextAlloc1stIndex;
    9617  }
    9618 
    9619  // Found non-null allocation.
    9620  if(nextAlloc1stIndex < suballoc1stCount)
    9621  {
    9622  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
    9623 
    9624  // 1. Process free space before this allocation.
    9625  if(lastOffset < suballoc.offset)
    9626  {
    9627  // There is free space from lastOffset to suballoc.offset.
    9628  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9629  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9630  }
    9631 
    9632  // 2. Process this allocation.
    9633  // There is allocation with suballoc.offset, suballoc.size.
    9634  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9635 
    9636  // 3. Prepare for next iteration.
    9637  lastOffset = suballoc.offset + suballoc.size;
    9638  ++nextAlloc1stIndex;
    9639  }
    9640  // We are at the end.
    9641  else
    9642  {
    9643  if(lastOffset < freeSpace1stTo2ndEnd)
    9644  {
    9645  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
    9646  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
    9647  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9648  }
    9649 
    9650  // End of loop.
    9651  lastOffset = freeSpace1stTo2ndEnd;
    9652  }
    9653  }
    9654 
    9655  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9656  {
    9657  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
    9658  while(lastOffset < size)
    9659  {
    9660  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
    9661  while(nextAlloc2ndIndex != SIZE_MAX &&
    9662  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
    9663  {
    9664  --nextAlloc2ndIndex;
    9665  }
    9666 
    9667  // Found non-null allocation.
    9668  if(nextAlloc2ndIndex != SIZE_MAX)
    9669  {
    9670  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
    9671 
    9672  // 1. Process free space before this allocation.
    9673  if(lastOffset < suballoc.offset)
    9674  {
    9675  // There is free space from lastOffset to suballoc.offset.
    9676  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
    9677  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9678  }
    9679 
    9680  // 2. Process this allocation.
    9681  // There is allocation with suballoc.offset, suballoc.size.
    9682  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
    9683 
    9684  // 3. Prepare for next iteration.
    9685  lastOffset = suballoc.offset + suballoc.size;
    9686  --nextAlloc2ndIndex;
    9687  }
    9688  // We are at the end.
    9689  else
    9690  {
    9691  if(lastOffset < size)
    9692  {
    9693  // There is free space from lastOffset to size.
    9694  const VkDeviceSize unusedRangeSize = size - lastOffset;
    9695  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
    9696  }
    9697 
    9698  // End of loop.
    9699  lastOffset = size;
    9700  }
    9701  }
    9702  }
    9703 
    9704  PrintDetailedMap_End(json);
    9705 }
    9706 #endif // #if VMA_STATS_STRING_ENABLED
    9707 
    9708 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
    9709  uint32_t currentFrameIndex,
    9710  uint32_t frameInUseCount,
    9711  VkDeviceSize bufferImageGranularity,
    9712  VkDeviceSize allocSize,
    9713  VkDeviceSize allocAlignment,
    9714  bool upperAddress,
    9715  VmaSuballocationType allocType,
    9716  bool canMakeOtherLost,
    9717  uint32_t strategy,
    9718  VmaAllocationRequest* pAllocationRequest)
    9719 {
    9720  VMA_ASSERT(allocSize > 0);
    9721  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
    9722  VMA_ASSERT(pAllocationRequest != VMA_NULL);
    9723  VMA_HEAVY_ASSERT(Validate());
    9724  return upperAddress ?
    9725  CreateAllocationRequest_UpperAddress(
    9726  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9727  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
    9728  CreateAllocationRequest_LowerAddress(
    9729  currentFrameIndex, frameInUseCount, bufferImageGranularity,
    9730  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
    9731 }
    9732 
    9733 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
    9734  uint32_t currentFrameIndex,
    9735  uint32_t frameInUseCount,
    9736  VkDeviceSize bufferImageGranularity,
    9737  VkDeviceSize allocSize,
    9738  VkDeviceSize allocAlignment,
    9739  VmaSuballocationType allocType,
    9740  bool canMakeOtherLost,
    9741  uint32_t strategy,
    9742  VmaAllocationRequest* pAllocationRequest)
    9743 {
    9744  const VkDeviceSize size = GetSize();
    9745  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9746  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9747 
    9748  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9749  {
    9750  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
    9751  return false;
    9752  }
    9753 
    9754  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
    9755  if(allocSize > size)
    9756  {
    9757  return false;
    9758  }
    9759  VkDeviceSize resultBaseOffset = size - allocSize;
    9760  if(!suballocations2nd.empty())
    9761  {
    9762  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9763  resultBaseOffset = lastSuballoc.offset - allocSize;
    9764  if(allocSize > lastSuballoc.offset)
    9765  {
    9766  return false;
    9767  }
    9768  }
    9769 
    9770  // Start from offset equal to end of free space.
    9771  VkDeviceSize resultOffset = resultBaseOffset;
    9772 
    9773  // Apply VMA_DEBUG_MARGIN at the end.
    9774  if(VMA_DEBUG_MARGIN > 0)
    9775  {
    9776  if(resultOffset < VMA_DEBUG_MARGIN)
    9777  {
    9778  return false;
    9779  }
    9780  resultOffset -= VMA_DEBUG_MARGIN;
    9781  }
    9782 
    9783  // Apply alignment.
    9784  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
    9785 
    9786  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
    9787  // Make bigger alignment if necessary.
    9788  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9789  {
    9790  bool bufferImageGranularityConflict = false;
    9791  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9792  {
    9793  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9794  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9795  {
    9796  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
    9797  {
    9798  bufferImageGranularityConflict = true;
    9799  break;
    9800  }
    9801  }
    9802  else
    9803  // Already on previous page.
    9804  break;
    9805  }
    9806  if(bufferImageGranularityConflict)
    9807  {
    9808  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
    9809  }
    9810  }
    9811 
    9812  // There is enough free space.
    9813  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
    9814  suballocations1st.back().offset + suballocations1st.back().size :
    9815  0;
    9816  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
    9817  {
    9818  // Check previous suballocations for BufferImageGranularity conflicts.
    9819  // If conflict exists, allocation cannot be made here.
    9820  if(bufferImageGranularity > 1)
    9821  {
    9822  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9823  {
    9824  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9825  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9826  {
    9827  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
    9828  {
    9829  return false;
    9830  }
    9831  }
    9832  else
    9833  {
    9834  // Already on next page.
    9835  break;
    9836  }
    9837  }
    9838  }
    9839 
    9840  // All tests passed: Success.
    9841  pAllocationRequest->offset = resultOffset;
    9842  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
    9843  pAllocationRequest->sumItemSize = 0;
    9844  // pAllocationRequest->item unused.
    9845  pAllocationRequest->itemsToMakeLostCount = 0;
    9846  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
    9847  return true;
    9848  }
    9849 
    9850  return false;
    9851 }
    9852 
    9853 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
    9854  uint32_t currentFrameIndex,
    9855  uint32_t frameInUseCount,
    9856  VkDeviceSize bufferImageGranularity,
    9857  VkDeviceSize allocSize,
    9858  VkDeviceSize allocAlignment,
    9859  VmaSuballocationType allocType,
    9860  bool canMakeOtherLost,
    9861  uint32_t strategy,
    9862  VmaAllocationRequest* pAllocationRequest)
    9863 {
    9864  const VkDeviceSize size = GetSize();
    9865  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    9866  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    9867 
    9868  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9869  {
    9870  // Try to allocate at the end of 1st vector.
    9871 
    9872  VkDeviceSize resultBaseOffset = 0;
    9873  if(!suballocations1st.empty())
    9874  {
    9875  const VmaSuballocation& lastSuballoc = suballocations1st.back();
    9876  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9877  }
    9878 
    9879  // Start from offset equal to beginning of free space.
    9880  VkDeviceSize resultOffset = resultBaseOffset;
    9881 
    9882  // Apply VMA_DEBUG_MARGIN at the beginning.
    9883  if(VMA_DEBUG_MARGIN > 0)
    9884  {
    9885  resultOffset += VMA_DEBUG_MARGIN;
    9886  }
    9887 
    9888  // Apply alignment.
    9889  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9890 
    9891  // Check previous suballocations for BufferImageGranularity conflicts.
    9892  // Make bigger alignment if necessary.
    9893  if(bufferImageGranularity > 1 && !suballocations1st.empty())
    9894  {
    9895  bool bufferImageGranularityConflict = false;
    9896  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
    9897  {
    9898  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
    9899  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9900  {
    9901  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9902  {
    9903  bufferImageGranularityConflict = true;
    9904  break;
    9905  }
    9906  }
    9907  else
    9908  // Already on previous page.
    9909  break;
    9910  }
    9911  if(bufferImageGranularityConflict)
    9912  {
    9913  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    9914  }
    9915  }
    9916 
    9917  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
    9918  suballocations2nd.back().offset : size;
    9919 
    9920  // There is enough free space at the end after alignment.
    9921  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
    9922  {
    9923  // Check next suballocations for BufferImageGranularity conflicts.
    9924  // If conflict exists, allocation cannot be made here.
    9925  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    9926  {
    9927  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
    9928  {
    9929  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
    9930  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    9931  {
    9932  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    9933  {
    9934  return false;
    9935  }
    9936  }
    9937  else
    9938  {
    9939  // Already on previous page.
    9940  break;
    9941  }
    9942  }
    9943  }
    9944 
    9945  // All tests passed: Success.
    9946  pAllocationRequest->offset = resultOffset;
    9947  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
    9948  pAllocationRequest->sumItemSize = 0;
    9949  // pAllocationRequest->item, customData unused.
    9950  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
    9951  pAllocationRequest->itemsToMakeLostCount = 0;
    9952  return true;
    9953  }
    9954  }
    9955 
    9956  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
    9957  // beginning of 1st vector as the end of free space.
    9958  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    9959  {
    9960  VMA_ASSERT(!suballocations1st.empty());
    9961 
    9962  VkDeviceSize resultBaseOffset = 0;
    9963  if(!suballocations2nd.empty())
    9964  {
    9965  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
    9966  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
    9967  }
    9968 
    9969  // Start from offset equal to beginning of free space.
    9970  VkDeviceSize resultOffset = resultBaseOffset;
    9971 
    9972  // Apply VMA_DEBUG_MARGIN at the beginning.
    9973  if(VMA_DEBUG_MARGIN > 0)
    9974  {
    9975  resultOffset += VMA_DEBUG_MARGIN;
    9976  }
    9977 
    9978  // Apply alignment.
    9979  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
    9980 
    9981  // Check previous suballocations for BufferImageGranularity conflicts.
    9982  // Make bigger alignment if necessary.
    9983  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
    9984  {
    9985  bool bufferImageGranularityConflict = false;
    9986  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
    9987  {
    9988  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
    9989  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
    9990  {
    9991  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
    9992  {
    9993  bufferImageGranularityConflict = true;
    9994  break;
    9995  }
    9996  }
    9997  else
    9998  // Already on previous page.
    9999  break;
    10000  }
    10001  if(bufferImageGranularityConflict)
    10002  {
    10003  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
    10004  }
    10005  }
    10006 
    10007  pAllocationRequest->itemsToMakeLostCount = 0;
    10008  pAllocationRequest->sumItemSize = 0;
    10009  size_t index1st = m_1stNullItemsBeginCount;
    10010 
    10011  if(canMakeOtherLost)
    10012  {
    10013  while(index1st < suballocations1st.size() &&
    10014  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
    10015  {
    10016  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
    10017  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10018  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
    10019  {
    10020  // No problem.
    10021  }
    10022  else
    10023  {
    10024  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10025  if(suballoc.hAllocation->CanBecomeLost() &&
    10026  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10027  {
    10028  ++pAllocationRequest->itemsToMakeLostCount;
    10029  pAllocationRequest->sumItemSize += suballoc.size;
    10030  }
    10031  else
    10032  {
    10033  return false;
    10034  }
    10035  }
    10036  ++index1st;
    10037  }
    10038 
    10039  // Check next suballocations for BufferImageGranularity conflicts.
    10040  // If conflict exists, we must mark more allocations lost or fail.
    10041  if(bufferImageGranularity > 1)
    10042  {
    10043  while(index1st < suballocations1st.size())
    10044  {
    10045  const VmaSuballocation& suballoc = suballocations1st[index1st];
    10046  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
    10047  {
    10048  if(suballoc.hAllocation != VK_NULL_HANDLE)
    10049  {
    10050  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
    10051  if(suballoc.hAllocation->CanBecomeLost() &&
    10052  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
    10053  {
    10054  ++pAllocationRequest->itemsToMakeLostCount;
    10055  pAllocationRequest->sumItemSize += suballoc.size;
    10056  }
    10057  else
    10058  {
    10059  return false;
    10060  }
    10061  }
    10062  }
    10063  else
    10064  {
    10065  // Already on next page.
    10066  break;
    10067  }
    10068  ++index1st;
    10069  }
    10070  }
    10071 
    10072  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
    10073  if(index1st == suballocations1st.size() &&
    10074  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
    10075  {
    10076  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
    10077  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
    10078  }
    10079  }
    10080 
    10081  // There is enough free space at the end after alignment.
    10082  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
    10083  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
    10084  {
    10085  // Check next suballocations for BufferImageGranularity conflicts.
    10086  // If conflict exists, allocation cannot be made here.
    10087  if(bufferImageGranularity > 1)
    10088  {
    10089  for(size_t nextSuballocIndex = index1st;
    10090  nextSuballocIndex < suballocations1st.size();
    10091  nextSuballocIndex++)
    10092  {
    10093  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
    10094  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
    10095  {
    10096  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
    10097  {
    10098  return false;
    10099  }
    10100  }
    10101  else
    10102  {
    10103  // Already on next page.
    10104  break;
    10105  }
    10106  }
    10107  }
    10108 
    10109  // All tests passed: Success.
    10110  pAllocationRequest->offset = resultOffset;
    10111  pAllocationRequest->sumFreeSize =
    10112  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
    10113  - resultBaseOffset
    10114  - pAllocationRequest->sumItemSize;
    10115  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
    10116  // pAllocationRequest->item, customData unused.
    10117  return true;
    10118  }
    10119  }
    10120 
    10121  return false;
    10122 }
    10123 
    10124 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
    10125  uint32_t currentFrameIndex,
    10126  uint32_t frameInUseCount,
    10127  VmaAllocationRequest* pAllocationRequest)
    10128 {
    10129  if(pAllocationRequest->itemsToMakeLostCount == 0)
    10130  {
    10131  return true;
    10132  }
    10133 
    10134  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
    10135 
    10136  // We always start from 1st.
    10137  SuballocationVectorType* suballocations = &AccessSuballocations1st();
    10138  size_t index = m_1stNullItemsBeginCount;
    10139  size_t madeLostCount = 0;
    10140  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
    10141  {
    10142  if(index == suballocations->size())
    10143  {
    10144  index = 0;
    10145  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
    10146  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10147  {
    10148  suballocations = &AccessSuballocations2nd();
    10149  }
    10150  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
    10151  // suballocations continues pointing at AccessSuballocations1st().
    10152  VMA_ASSERT(!suballocations->empty());
    10153  }
    10154  VmaSuballocation& suballoc = (*suballocations)[index];
    10155  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10156  {
    10157  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
    10158  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
    10159  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10160  {
    10161  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10162  suballoc.hAllocation = VK_NULL_HANDLE;
    10163  m_SumFreeSize += suballoc.size;
    10164  if(suballocations == &AccessSuballocations1st())
    10165  {
    10166  ++m_1stNullItemsMiddleCount;
    10167  }
    10168  else
    10169  {
    10170  ++m_2ndNullItemsCount;
    10171  }
    10172  ++madeLostCount;
    10173  }
    10174  else
    10175  {
    10176  return false;
    10177  }
    10178  }
    10179  ++index;
    10180  }
    10181 
    10182  CleanupAfterFree();
    10183  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
    10184 
    10185  return true;
    10186 }
    10187 
    10188 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10189 {
    10190  uint32_t lostAllocationCount = 0;
    10191 
    10192  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10193  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10194  {
    10195  VmaSuballocation& suballoc = suballocations1st[i];
    10196  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10197  suballoc.hAllocation->CanBecomeLost() &&
    10198  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10199  {
    10200  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10201  suballoc.hAllocation = VK_NULL_HANDLE;
    10202  ++m_1stNullItemsMiddleCount;
    10203  m_SumFreeSize += suballoc.size;
    10204  ++lostAllocationCount;
    10205  }
    10206  }
    10207 
    10208  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10209  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10210  {
    10211  VmaSuballocation& suballoc = suballocations2nd[i];
    10212  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
    10213  suballoc.hAllocation->CanBecomeLost() &&
    10214  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
    10215  {
    10216  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10217  suballoc.hAllocation = VK_NULL_HANDLE;
    10218  ++m_2ndNullItemsCount;
    10219  m_SumFreeSize += suballoc.size;
    10220  ++lostAllocationCount;
    10221  }
    10222  }
    10223 
    10224  if(lostAllocationCount)
    10225  {
    10226  CleanupAfterFree();
    10227  }
    10228 
    10229  return lostAllocationCount;
    10230 }
    10231 
    10232 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
    10233 {
    10234  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10235  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
    10236  {
    10237  const VmaSuballocation& suballoc = suballocations1st[i];
    10238  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10239  {
    10240  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10241  {
    10242  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10243  return VK_ERROR_VALIDATION_FAILED_EXT;
    10244  }
    10245  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10246  {
    10247  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10248  return VK_ERROR_VALIDATION_FAILED_EXT;
    10249  }
    10250  }
    10251  }
    10252 
    10253  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10254  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
    10255  {
    10256  const VmaSuballocation& suballoc = suballocations2nd[i];
    10257  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
    10258  {
    10259  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
    10260  {
    10261  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
    10262  return VK_ERROR_VALIDATION_FAILED_EXT;
    10263  }
    10264  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
    10265  {
    10266  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
    10267  return VK_ERROR_VALIDATION_FAILED_EXT;
    10268  }
    10269  }
    10270  }
    10271 
    10272  return VK_SUCCESS;
    10273 }
    10274 
    10275 void VmaBlockMetadata_Linear::Alloc(
    10276  const VmaAllocationRequest& request,
    10277  VmaSuballocationType type,
    10278  VkDeviceSize allocSize,
    10279  VmaAllocation hAllocation)
    10280 {
    10281  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
    10282 
    10283  switch(request.type)
    10284  {
    10285  case VmaAllocationRequestType::UpperAddress:
    10286  {
    10287  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
    10288  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
    10289  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10290  suballocations2nd.push_back(newSuballoc);
    10291  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
    10292  }
    10293  break;
    10294  case VmaAllocationRequestType::EndOf1st:
    10295  {
    10296  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10297 
    10298  VMA_ASSERT(suballocations1st.empty() ||
    10299  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
    10300  // Check if it fits before the end of the block.
    10301  VMA_ASSERT(request.offset + allocSize <= GetSize());
    10302 
    10303  suballocations1st.push_back(newSuballoc);
    10304  }
    10305  break;
    10306  case VmaAllocationRequestType::EndOf2nd:
    10307  {
    10308  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10309  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
    10310  VMA_ASSERT(!suballocations1st.empty() &&
    10311  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
    10312  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10313 
    10314  switch(m_2ndVectorMode)
    10315  {
    10316  case SECOND_VECTOR_EMPTY:
    10317  // First allocation from second part ring buffer.
    10318  VMA_ASSERT(suballocations2nd.empty());
    10319  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
    10320  break;
    10321  case SECOND_VECTOR_RING_BUFFER:
    10322  // 2-part ring buffer is already started.
    10323  VMA_ASSERT(!suballocations2nd.empty());
    10324  break;
    10325  case SECOND_VECTOR_DOUBLE_STACK:
    10326  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
    10327  break;
    10328  default:
    10329  VMA_ASSERT(0);
    10330  }
    10331 
    10332  suballocations2nd.push_back(newSuballoc);
    10333  }
    10334  break;
    10335  default:
    10336  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
    10337  }
    10338 
    10339  m_SumFreeSize -= newSuballoc.size;
    10340 }
    10341 
    10342 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
    10343 {
    10344  FreeAtOffset(allocation->GetOffset());
    10345 }
    10346 
    10347 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
    10348 {
    10349  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10350  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10351 
    10352  if(!suballocations1st.empty())
    10353  {
    10354  // First allocation: Mark it as next empty at the beginning.
    10355  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
    10356  if(firstSuballoc.offset == offset)
    10357  {
    10358  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
    10359  firstSuballoc.hAllocation = VK_NULL_HANDLE;
    10360  m_SumFreeSize += firstSuballoc.size;
    10361  ++m_1stNullItemsBeginCount;
    10362  CleanupAfterFree();
    10363  return;
    10364  }
    10365  }
    10366 
    10367  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
    10368  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
    10369  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
    10370  {
    10371  VmaSuballocation& lastSuballoc = suballocations2nd.back();
    10372  if(lastSuballoc.offset == offset)
    10373  {
    10374  m_SumFreeSize += lastSuballoc.size;
    10375  suballocations2nd.pop_back();
    10376  CleanupAfterFree();
    10377  return;
    10378  }
    10379  }
    10380  // Last allocation in 1st vector.
    10381  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
    10382  {
    10383  VmaSuballocation& lastSuballoc = suballocations1st.back();
    10384  if(lastSuballoc.offset == offset)
    10385  {
    10386  m_SumFreeSize += lastSuballoc.size;
    10387  suballocations1st.pop_back();
    10388  CleanupAfterFree();
    10389  return;
    10390  }
    10391  }
    10392 
    10393  // Item from the middle of 1st vector.
    10394  {
    10395  VmaSuballocation refSuballoc;
    10396  refSuballoc.offset = offset;
    10397  // Rest of members stays uninitialized intentionally for better performance.
    10398  SuballocationVectorType::iterator it = VmaVectorFindSorted<VmaSuballocationOffsetLess>(
    10399  suballocations1st.begin() + m_1stNullItemsBeginCount,
    10400  suballocations1st.end(),
    10401  refSuballoc);
    10402  if(it != suballocations1st.end())
    10403  {
    10404  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10405  it->hAllocation = VK_NULL_HANDLE;
    10406  ++m_1stNullItemsMiddleCount;
    10407  m_SumFreeSize += it->size;
    10408  CleanupAfterFree();
    10409  return;
    10410  }
    10411  }
    10412 
    10413  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
    10414  {
    10415  // Item from the middle of 2nd vector.
    10416  VmaSuballocation refSuballoc;
    10417  refSuballoc.offset = offset;
    10418  // Rest of members stays uninitialized intentionally for better performance.
    10419  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
    10420  VmaVectorFindSorted<VmaSuballocationOffsetLess>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc) :
    10421  VmaVectorFindSorted<VmaSuballocationOffsetGreater>(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc);
    10422  if(it != suballocations2nd.end())
    10423  {
    10424  it->type = VMA_SUBALLOCATION_TYPE_FREE;
    10425  it->hAllocation = VK_NULL_HANDLE;
    10426  ++m_2ndNullItemsCount;
    10427  m_SumFreeSize += it->size;
    10428  CleanupAfterFree();
    10429  return;
    10430  }
    10431  }
    10432 
    10433  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
    10434 }
    10435 
    10436 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
    10437 {
    10438  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10439  const size_t suballocCount = AccessSuballocations1st().size();
    10440  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
    10441 }
    10442 
    10443 void VmaBlockMetadata_Linear::CleanupAfterFree()
    10444 {
    10445  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
    10446  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
    10447 
    10448  if(IsEmpty())
    10449  {
    10450  suballocations1st.clear();
    10451  suballocations2nd.clear();
    10452  m_1stNullItemsBeginCount = 0;
    10453  m_1stNullItemsMiddleCount = 0;
    10454  m_2ndNullItemsCount = 0;
    10455  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10456  }
    10457  else
    10458  {
    10459  const size_t suballoc1stCount = suballocations1st.size();
    10460  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
    10461  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
    10462 
    10463  // Find more null items at the beginning of 1st vector.
    10464  while(m_1stNullItemsBeginCount < suballoc1stCount &&
    10465  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10466  {
    10467  ++m_1stNullItemsBeginCount;
    10468  --m_1stNullItemsMiddleCount;
    10469  }
    10470 
    10471  // Find more null items at the end of 1st vector.
    10472  while(m_1stNullItemsMiddleCount > 0 &&
    10473  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
    10474  {
    10475  --m_1stNullItemsMiddleCount;
    10476  suballocations1st.pop_back();
    10477  }
    10478 
    10479  // Find more null items at the end of 2nd vector.
    10480  while(m_2ndNullItemsCount > 0 &&
    10481  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
    10482  {
    10483  --m_2ndNullItemsCount;
    10484  suballocations2nd.pop_back();
    10485  }
    10486 
    10487  // Find more null items at the beginning of 2nd vector.
    10488  while(m_2ndNullItemsCount > 0 &&
    10489  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
    10490  {
    10491  --m_2ndNullItemsCount;
    10492  suballocations2nd.remove(0);
    10493  }
    10494 
    10495  if(ShouldCompact1st())
    10496  {
    10497  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
    10498  size_t srcIndex = m_1stNullItemsBeginCount;
    10499  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
    10500  {
    10501  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
    10502  {
    10503  ++srcIndex;
    10504  }
    10505  if(dstIndex != srcIndex)
    10506  {
    10507  suballocations1st[dstIndex] = suballocations1st[srcIndex];
    10508  }
    10509  ++srcIndex;
    10510  }
    10511  suballocations1st.resize(nonNullItemCount);
    10512  m_1stNullItemsBeginCount = 0;
    10513  m_1stNullItemsMiddleCount = 0;
    10514  }
    10515 
    10516  // 2nd vector became empty.
    10517  if(suballocations2nd.empty())
    10518  {
    10519  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10520  }
    10521 
    10522  // 1st vector became empty.
    10523  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
    10524  {
    10525  suballocations1st.clear();
    10526  m_1stNullItemsBeginCount = 0;
    10527 
    10528  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
    10529  {
    10530  // Swap 1st with 2nd. Now 2nd is empty.
    10531  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
    10532  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
    10533  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
    10534  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
    10535  {
    10536  ++m_1stNullItemsBeginCount;
    10537  --m_1stNullItemsMiddleCount;
    10538  }
    10539  m_2ndNullItemsCount = 0;
    10540  m_1stVectorIndex ^= 1;
    10541  }
    10542  }
    10543  }
    10544 
    10545  VMA_HEAVY_ASSERT(Validate());
    10546 }
    10547 
    10548 
    10550 // class VmaBlockMetadata_Buddy
    10551 
    10552 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
    10553  VmaBlockMetadata(hAllocator),
    10554  m_Root(VMA_NULL),
    10555  m_AllocationCount(0),
    10556  m_FreeCount(1),
    10557  m_SumFreeSize(0)
    10558 {
    10559  memset(m_FreeList, 0, sizeof(m_FreeList));
    10560 }
    10561 
    10562 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
    10563 {
    10564  DeleteNode(m_Root);
    10565 }
    10566 
    10567 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
    10568 {
    10569  VmaBlockMetadata::Init(size);
    10570 
    10571  m_UsableSize = VmaPrevPow2(size);
    10572  m_SumFreeSize = m_UsableSize;
    10573 
    10574  // Calculate m_LevelCount.
    10575  m_LevelCount = 1;
    10576  while(m_LevelCount < MAX_LEVELS &&
    10577  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
    10578  {
    10579  ++m_LevelCount;
    10580  }
    10581 
    10582  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
    10583  rootNode->offset = 0;
    10584  rootNode->type = Node::TYPE_FREE;
    10585  rootNode->parent = VMA_NULL;
    10586  rootNode->buddy = VMA_NULL;
    10587 
    10588  m_Root = rootNode;
    10589  AddToFreeListFront(0, rootNode);
    10590 }
    10591 
    10592 bool VmaBlockMetadata_Buddy::Validate() const
    10593 {
    10594  // Validate tree.
    10595  ValidationContext ctx;
    10596  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
    10597  {
    10598  VMA_VALIDATE(false && "ValidateNode failed.");
    10599  }
    10600  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
    10601  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
    10602 
    10603  // Validate free node lists.
    10604  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10605  {
    10606  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
    10607  m_FreeList[level].front->free.prev == VMA_NULL);
    10608 
    10609  for(Node* node = m_FreeList[level].front;
    10610  node != VMA_NULL;
    10611  node = node->free.next)
    10612  {
    10613  VMA_VALIDATE(node->type == Node::TYPE_FREE);
    10614 
    10615  if(node->free.next == VMA_NULL)
    10616  {
    10617  VMA_VALIDATE(m_FreeList[level].back == node);
    10618  }
    10619  else
    10620  {
    10621  VMA_VALIDATE(node->free.next->free.prev == node);
    10622  }
    10623  }
    10624  }
    10625 
    10626  // Validate that free lists ar higher levels are empty.
    10627  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
    10628  {
    10629  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
    10630  }
    10631 
    10632  return true;
    10633 }
    10634 
    10635 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
    10636 {
    10637  for(uint32_t level = 0; level < m_LevelCount; ++level)
    10638  {
    10639  if(m_FreeList[level].front != VMA_NULL)
    10640  {
    10641  return LevelToNodeSize(level);
    10642  }
    10643  }
    10644  return 0;
    10645 }
    10646 
    10647 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
    10648 {
    10649  const VkDeviceSize unusableSize = GetUnusableSize();
    10650 
    10651  outInfo.blockCount = 1;
    10652 
    10653  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
    10654  outInfo.usedBytes = outInfo.unusedBytes = 0;
    10655 
    10656  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
    10657  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
    10658  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
    10659 
    10660  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
    10661 
    10662  if(unusableSize > 0)
    10663  {
    10664  ++outInfo.unusedRangeCount;
    10665  outInfo.unusedBytes += unusableSize;
    10666  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
    10667  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
    10668  }
    10669 }
    10670 
    10671 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
    10672 {
    10673  const VkDeviceSize unusableSize = GetUnusableSize();
    10674 
    10675  inoutStats.size += GetSize();
    10676  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
    10677  inoutStats.allocationCount += m_AllocationCount;
    10678  inoutStats.unusedRangeCount += m_FreeCount;
    10679  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
    10680 
    10681  if(unusableSize > 0)
    10682  {
    10683  ++inoutStats.unusedRangeCount;
    10684  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
    10685  }
    10686 }
    10687 
    10688 #if VMA_STATS_STRING_ENABLED
    10689 
    10690 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
    10691 {
    10692  // TODO optimize
    10693  VmaStatInfo stat;
    10694  CalcAllocationStatInfo(stat);
    10695 
    10696  PrintDetailedMap_Begin(
    10697  json,
    10698  stat.unusedBytes,
    10699  stat.allocationCount,
    10700  stat.unusedRangeCount);
    10701 
    10702  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
    10703 
    10704  const VkDeviceSize unusableSize = GetUnusableSize();
    10705  if(unusableSize > 0)
    10706  {
    10707  PrintDetailedMap_UnusedRange(json,
    10708  m_UsableSize, // offset
    10709  unusableSize); // size
    10710  }
    10711 
    10712  PrintDetailedMap_End(json);
    10713 }
    10714 
    10715 #endif // #if VMA_STATS_STRING_ENABLED
    10716 
    10717 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
    10718  uint32_t currentFrameIndex,
    10719  uint32_t frameInUseCount,
    10720  VkDeviceSize bufferImageGranularity,
    10721  VkDeviceSize allocSize,
    10722  VkDeviceSize allocAlignment,
    10723  bool upperAddress,
    10724  VmaSuballocationType allocType,
    10725  bool canMakeOtherLost,
    10726  uint32_t strategy,
    10727  VmaAllocationRequest* pAllocationRequest)
    10728 {
    10729  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
    10730 
    10731  // Simple way to respect bufferImageGranularity. May be optimized some day.
    10732  // Whenever it might be an OPTIMAL image...
    10733  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
    10734  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
    10735  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
    10736  {
    10737  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
    10738  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
    10739  }
    10740 
    10741  if(allocSize > m_UsableSize)
    10742  {
    10743  return false;
    10744  }
    10745 
    10746  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10747  for(uint32_t level = targetLevel + 1; level--; )
    10748  {
    10749  for(Node* freeNode = m_FreeList[level].front;
    10750  freeNode != VMA_NULL;
    10751  freeNode = freeNode->free.next)
    10752  {
    10753  if(freeNode->offset % allocAlignment == 0)
    10754  {
    10755  pAllocationRequest->type = VmaAllocationRequestType::Normal;
    10756  pAllocationRequest->offset = freeNode->offset;
    10757  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
    10758  pAllocationRequest->sumItemSize = 0;
    10759  pAllocationRequest->itemsToMakeLostCount = 0;
    10760  pAllocationRequest->customData = (void*)(uintptr_t)level;
    10761  return true;
    10762  }
    10763  }
    10764  }
    10765 
    10766  return false;
    10767 }
    10768 
    10769 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
    10770  uint32_t currentFrameIndex,
    10771  uint32_t frameInUseCount,
    10772  VmaAllocationRequest* pAllocationRequest)
    10773 {
    10774  /*
    10775  Lost allocations are not supported in buddy allocator at the moment.
    10776  Support might be added in the future.
    10777  */
    10778  return pAllocationRequest->itemsToMakeLostCount == 0;
    10779 }
    10780 
    10781 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
    10782 {
    10783  /*
    10784  Lost allocations are not supported in buddy allocator at the moment.
    10785  Support might be added in the future.
    10786  */
    10787  return 0;
    10788 }
    10789 
    10790 void VmaBlockMetadata_Buddy::Alloc(
    10791  const VmaAllocationRequest& request,
    10792  VmaSuballocationType type,
    10793  VkDeviceSize allocSize,
    10794  VmaAllocation hAllocation)
    10795 {
    10796  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
    10797 
    10798  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
    10799  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
    10800 
    10801  Node* currNode = m_FreeList[currLevel].front;
    10802  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10803  while(currNode->offset != request.offset)
    10804  {
    10805  currNode = currNode->free.next;
    10806  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
    10807  }
    10808 
    10809  // Go down, splitting free nodes.
    10810  while(currLevel < targetLevel)
    10811  {
    10812  // currNode is already first free node at currLevel.
    10813  // Remove it from list of free nodes at this currLevel.
    10814  RemoveFromFreeList(currLevel, currNode);
    10815 
    10816  const uint32_t childrenLevel = currLevel + 1;
    10817 
    10818  // Create two free sub-nodes.
    10819  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
    10820  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
    10821 
    10822  leftChild->offset = currNode->offset;
    10823  leftChild->type = Node::TYPE_FREE;
    10824  leftChild->parent = currNode;
    10825  leftChild->buddy = rightChild;
    10826 
    10827  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
    10828  rightChild->type = Node::TYPE_FREE;
    10829  rightChild->parent = currNode;
    10830  rightChild->buddy = leftChild;
    10831 
    10832  // Convert current currNode to split type.
    10833  currNode->type = Node::TYPE_SPLIT;
    10834  currNode->split.leftChild = leftChild;
    10835 
    10836  // Add child nodes to free list. Order is important!
    10837  AddToFreeListFront(childrenLevel, rightChild);
    10838  AddToFreeListFront(childrenLevel, leftChild);
    10839 
    10840  ++m_FreeCount;
    10841  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
    10842  ++currLevel;
    10843  currNode = m_FreeList[currLevel].front;
    10844 
    10845  /*
    10846  We can be sure that currNode, as left child of node previously split,
    10847  also fullfills the alignment requirement.
    10848  */
    10849  }
    10850 
    10851  // Remove from free list.
    10852  VMA_ASSERT(currLevel == targetLevel &&
    10853  currNode != VMA_NULL &&
    10854  currNode->type == Node::TYPE_FREE);
    10855  RemoveFromFreeList(currLevel, currNode);
    10856 
    10857  // Convert to allocation node.
    10858  currNode->type = Node::TYPE_ALLOCATION;
    10859  currNode->allocation.alloc = hAllocation;
    10860 
    10861  ++m_AllocationCount;
    10862  --m_FreeCount;
    10863  m_SumFreeSize -= allocSize;
    10864 }
    10865 
    10866 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
    10867 {
    10868  if(node->type == Node::TYPE_SPLIT)
    10869  {
    10870  DeleteNode(node->split.leftChild->buddy);
    10871  DeleteNode(node->split.leftChild);
    10872  }
    10873 
    10874  vma_delete(GetAllocationCallbacks(), node);
    10875 }
    10876 
    10877 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
    10878 {
    10879  VMA_VALIDATE(level < m_LevelCount);
    10880  VMA_VALIDATE(curr->parent == parent);
    10881  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
    10882  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
    10883  switch(curr->type)
    10884  {
    10885  case Node::TYPE_FREE:
    10886  // curr->free.prev, next are validated separately.
    10887  ctx.calculatedSumFreeSize += levelNodeSize;
    10888  ++ctx.calculatedFreeCount;
    10889  break;
    10890  case Node::TYPE_ALLOCATION:
    10891  ++ctx.calculatedAllocationCount;
    10892  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
    10893  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
    10894  break;
    10895  case Node::TYPE_SPLIT:
    10896  {
    10897  const uint32_t childrenLevel = level + 1;
    10898  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
    10899  const Node* const leftChild = curr->split.leftChild;
    10900  VMA_VALIDATE(leftChild != VMA_NULL);
    10901  VMA_VALIDATE(leftChild->offset == curr->offset);
    10902  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
    10903  {
    10904  VMA_VALIDATE(false && "ValidateNode for left child failed.");
    10905  }
    10906  const Node* const rightChild = leftChild->buddy;
    10907  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
    10908  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
    10909  {
    10910  VMA_VALIDATE(false && "ValidateNode for right child failed.");
    10911  }
    10912  }
    10913  break;
    10914  default:
    10915  return false;
    10916  }
    10917 
    10918  return true;
    10919 }
    10920 
    10921 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
    10922 {
    10923  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
    10924  uint32_t level = 0;
    10925  VkDeviceSize currLevelNodeSize = m_UsableSize;
    10926  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
    10927  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
    10928  {
    10929  ++level;
    10930  currLevelNodeSize = nextLevelNodeSize;
    10931  nextLevelNodeSize = currLevelNodeSize >> 1;
    10932  }
    10933  return level;
    10934 }
    10935 
    10936 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
    10937 {
    10938  // Find node and level.
    10939  Node* node = m_Root;
    10940  VkDeviceSize nodeOffset = 0;
    10941  uint32_t level = 0;
    10942  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
    10943  while(node->type == Node::TYPE_SPLIT)
    10944  {
    10945  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
    10946  if(offset < nodeOffset + nextLevelSize)
    10947  {
    10948  node = node->split.leftChild;
    10949  }
    10950  else
    10951  {
    10952  node = node->split.leftChild->buddy;
    10953  nodeOffset += nextLevelSize;
    10954  }
    10955  ++level;
    10956  levelNodeSize = nextLevelSize;
    10957  }
    10958 
    10959  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
    10960  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
    10961 
    10962  ++m_FreeCount;
    10963  --m_AllocationCount;
    10964  m_SumFreeSize += alloc->GetSize();
    10965 
    10966  node->type = Node::TYPE_FREE;
    10967 
    10968  // Join free nodes if possible.
    10969  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
    10970  {
    10971  RemoveFromFreeList(level, node->buddy);
    10972  Node* const parent = node->parent;
    10973 
    10974  vma_delete(GetAllocationCallbacks(), node->buddy);
    10975  vma_delete(GetAllocationCallbacks(), node);
    10976  parent->type = Node::TYPE_FREE;
    10977 
    10978  node = parent;
    10979  --level;
    10980  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
    10981  --m_FreeCount;
    10982  }
    10983 
    10984  AddToFreeListFront(level, node);
    10985 }
    10986 
    10987 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
    10988 {
    10989  switch(node->type)
    10990  {
    10991  case Node::TYPE_FREE:
    10992  ++outInfo.unusedRangeCount;
    10993  outInfo.unusedBytes += levelNodeSize;
    10994  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
    10995  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
    10996  break;
    10997  case Node::TYPE_ALLOCATION:
    10998  {
    10999  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11000  ++outInfo.allocationCount;
    11001  outInfo.usedBytes += allocSize;
    11002  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
    11003  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
    11004 
    11005  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
    11006  if(unusedRangeSize > 0)
    11007  {
    11008  ++outInfo.unusedRangeCount;
    11009  outInfo.unusedBytes += unusedRangeSize;
    11010  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
    11011  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
    11012  }
    11013  }
    11014  break;
    11015  case Node::TYPE_SPLIT:
    11016  {
    11017  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11018  const Node* const leftChild = node->split.leftChild;
    11019  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
    11020  const Node* const rightChild = leftChild->buddy;
    11021  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
    11022  }
    11023  break;
    11024  default:
    11025  VMA_ASSERT(0);
    11026  }
    11027 }
    11028 
    11029 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
    11030 {
    11031  VMA_ASSERT(node->type == Node::TYPE_FREE);
    11032 
    11033  // List is empty.
    11034  Node* const frontNode = m_FreeList[level].front;
    11035  if(frontNode == VMA_NULL)
    11036  {
    11037  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
    11038  node->free.prev = node->free.next = VMA_NULL;
    11039  m_FreeList[level].front = m_FreeList[level].back = node;
    11040  }
    11041  else
    11042  {
    11043  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
    11044  node->free.prev = VMA_NULL;
    11045  node->free.next = frontNode;
    11046  frontNode->free.prev = node;
    11047  m_FreeList[level].front = node;
    11048  }
    11049 }
    11050 
    11051 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
    11052 {
    11053  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
    11054 
    11055  // It is at the front.
    11056  if(node->free.prev == VMA_NULL)
    11057  {
    11058  VMA_ASSERT(m_FreeList[level].front == node);
    11059  m_FreeList[level].front = node->free.next;
    11060  }
    11061  else
    11062  {
    11063  Node* const prevFreeNode = node->free.prev;
    11064  VMA_ASSERT(prevFreeNode->free.next == node);
    11065  prevFreeNode->free.next = node->free.next;
    11066  }
    11067 
    11068  // It is at the back.
    11069  if(node->free.next == VMA_NULL)
    11070  {
    11071  VMA_ASSERT(m_FreeList[level].back == node);
    11072  m_FreeList[level].back = node->free.prev;
    11073  }
    11074  else
    11075  {
    11076  Node* const nextFreeNode = node->free.next;
    11077  VMA_ASSERT(nextFreeNode->free.prev == node);
    11078  nextFreeNode->free.prev = node->free.prev;
    11079  }
    11080 }
    11081 
    11082 #if VMA_STATS_STRING_ENABLED
    11083 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
    11084 {
    11085  switch(node->type)
    11086  {
    11087  case Node::TYPE_FREE:
    11088  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
    11089  break;
    11090  case Node::TYPE_ALLOCATION:
    11091  {
    11092  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
    11093  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
    11094  if(allocSize < levelNodeSize)
    11095  {
    11096  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
    11097  }
    11098  }
    11099  break;
    11100  case Node::TYPE_SPLIT:
    11101  {
    11102  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
    11103  const Node* const leftChild = node->split.leftChild;
    11104  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
    11105  const Node* const rightChild = leftChild->buddy;
    11106  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
    11107  }
    11108  break;
    11109  default:
    11110  VMA_ASSERT(0);
    11111  }
    11112 }
    11113 #endif // #if VMA_STATS_STRING_ENABLED
    11114 
    11115 
    11117 // class VmaDeviceMemoryBlock
    11118 
    11119 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
    11120  m_pMetadata(VMA_NULL),
    11121  m_MemoryTypeIndex(UINT32_MAX),
    11122  m_Id(0),
    11123  m_hMemory(VK_NULL_HANDLE),
    11124  m_MapCount(0),
    11125  m_pMappedData(VMA_NULL)
    11126 {
    11127 }
    11128 
    11129 void VmaDeviceMemoryBlock::Init(
    11130  VmaAllocator hAllocator,
    11131  VmaPool hParentPool,
    11132  uint32_t newMemoryTypeIndex,
    11133  VkDeviceMemory newMemory,
    11134  VkDeviceSize newSize,
    11135  uint32_t id,
    11136  uint32_t algorithm)
    11137 {
    11138  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
    11139 
    11140  m_hParentPool = hParentPool;
    11141  m_MemoryTypeIndex = newMemoryTypeIndex;
    11142  m_Id = id;
    11143  m_hMemory = newMemory;
    11144 
    11145  switch(algorithm)
    11146  {
    11148  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
    11149  break;
    11151  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
    11152  break;
    11153  default:
    11154  VMA_ASSERT(0);
    11155  // Fall-through.
    11156  case 0:
    11157  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
    11158  }
    11159  m_pMetadata->Init(newSize);
    11160 }
    11161 
    11162 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
    11163 {
    11164  // This is the most important assert in the entire library.
    11165  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
    11166  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
    11167 
    11168  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
    11169  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
    11170  m_hMemory = VK_NULL_HANDLE;
    11171 
    11172  vma_delete(allocator, m_pMetadata);
    11173  m_pMetadata = VMA_NULL;
    11174 }
    11175 
    11176 bool VmaDeviceMemoryBlock::Validate() const
    11177 {
    11178  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
    11179  (m_pMetadata->GetSize() != 0));
    11180 
    11181  return m_pMetadata->Validate();
    11182 }
    11183 
    11184 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
    11185 {
    11186  void* pData = nullptr;
    11187  VkResult res = Map(hAllocator, 1, &pData);
    11188  if(res != VK_SUCCESS)
    11189  {
    11190  return res;
    11191  }
    11192 
    11193  res = m_pMetadata->CheckCorruption(pData);
    11194 
    11195  Unmap(hAllocator, 1);
    11196 
    11197  return res;
    11198 }
    11199 
    11200 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
    11201 {
    11202  if(count == 0)
    11203  {
    11204  return VK_SUCCESS;
    11205  }
    11206 
    11207  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11208  if(m_MapCount != 0)
    11209  {
    11210  m_MapCount += count;
    11211  VMA_ASSERT(m_pMappedData != VMA_NULL);
    11212  if(ppData != VMA_NULL)
    11213  {
    11214  *ppData = m_pMappedData;
    11215  }
    11216  return VK_SUCCESS;
    11217  }
    11218  else
    11219  {
    11220  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
    11221  hAllocator->m_hDevice,
    11222  m_hMemory,
    11223  0, // offset
    11224  VK_WHOLE_SIZE,
    11225  0, // flags
    11226  &m_pMappedData);
    11227  if(result == VK_SUCCESS)
    11228  {
    11229  if(ppData != VMA_NULL)
    11230  {
    11231  *ppData = m_pMappedData;
    11232  }
    11233  m_MapCount = count;
    11234  }
    11235  return result;
    11236  }
    11237 }
    11238 
    11239 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
    11240 {
    11241  if(count == 0)
    11242  {
    11243  return;
    11244  }
    11245 
    11246  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11247  if(m_MapCount >= count)
    11248  {
    11249  m_MapCount -= count;
    11250  if(m_MapCount == 0)
    11251  {
    11252  m_pMappedData = VMA_NULL;
    11253  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
    11254  }
    11255  }
    11256  else
    11257  {
    11258  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
    11259  }
    11260 }
    11261 
    11262 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11263 {
    11264  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11265  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11266 
    11267  void* pData;
    11268  VkResult res = Map(hAllocator, 1, &pData);
    11269  if(res != VK_SUCCESS)
    11270  {
    11271  return res;
    11272  }
    11273 
    11274  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
    11275  VmaWriteMagicValue(pData, allocOffset + allocSize);
    11276 
    11277  Unmap(hAllocator, 1);
    11278 
    11279  return VK_SUCCESS;
    11280 }
    11281 
    11282 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
    11283 {
    11284  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
    11285  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
    11286 
    11287  void* pData;
    11288  VkResult res = Map(hAllocator, 1, &pData);
    11289  if(res != VK_SUCCESS)
    11290  {
    11291  return res;
    11292  }
    11293 
    11294  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
    11295  {
    11296  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
    11297  }
    11298  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
    11299  {
    11300  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
    11301  }
    11302 
    11303  Unmap(hAllocator, 1);
    11304 
    11305  return VK_SUCCESS;
    11306 }
    11307 
    11308 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
    11309  const VmaAllocator hAllocator,
    11310  const VmaAllocation hAllocation,
    11311  VkBuffer hBuffer)
    11312 {
    11313  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11314  hAllocation->GetBlock() == this);
    11315  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11316  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11317  return hAllocator->GetVulkanFunctions().vkBindBufferMemory(
    11318  hAllocator->m_hDevice,
    11319  hBuffer,
    11320  m_hMemory,
    11321  hAllocation->GetOffset());
    11322 }
    11323 
    11324 VkResult VmaDeviceMemoryBlock::BindImageMemory(
    11325  const VmaAllocator hAllocator,
    11326  const VmaAllocation hAllocation,
    11327  VkImage hImage)
    11328 {
    11329  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
    11330  hAllocation->GetBlock() == this);
    11331  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
    11332  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
    11333  return hAllocator->GetVulkanFunctions().vkBindImageMemory(
    11334  hAllocator->m_hDevice,
    11335  hImage,
    11336  m_hMemory,
    11337  hAllocation->GetOffset());
    11338 }
    11339 
    11340 static void InitStatInfo(VmaStatInfo& outInfo)
    11341 {
    11342  memset(&outInfo, 0, sizeof(outInfo));
    11343  outInfo.allocationSizeMin = UINT64_MAX;
    11344  outInfo.unusedRangeSizeMin = UINT64_MAX;
    11345 }
    11346 
    11347 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
    11348 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
    11349 {
    11350  inoutInfo.blockCount += srcInfo.blockCount;
    11351  inoutInfo.allocationCount += srcInfo.allocationCount;
    11352  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
    11353  inoutInfo.usedBytes += srcInfo.usedBytes;
    11354  inoutInfo.unusedBytes += srcInfo.unusedBytes;
    11355  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
    11356  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
    11357  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
    11358  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
    11359 }
    11360 
    11361 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
    11362 {
    11363  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
    11364  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
    11365  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
    11366  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
    11367 }
    11368 
    11369 VmaPool_T::VmaPool_T(
    11370  VmaAllocator hAllocator,
    11371  const VmaPoolCreateInfo& createInfo,
    11372  VkDeviceSize preferredBlockSize) :
    11373  m_BlockVector(
    11374  hAllocator,
    11375  this, // hParentPool
    11376  createInfo.memoryTypeIndex,
    11377  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
    11378  createInfo.minBlockCount,
    11379  createInfo.maxBlockCount,
    11380  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
    11381  createInfo.frameInUseCount,
    11382  true, // isCustomPool
    11383  createInfo.blockSize != 0, // explicitBlockSize
    11384  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
    11385  m_Id(0)
    11386 {
    11387 }
    11388 
    11389 VmaPool_T::~VmaPool_T()
    11390 {
    11391 }
    11392 
    11393 #if VMA_STATS_STRING_ENABLED
    11394 
    11395 #endif // #if VMA_STATS_STRING_ENABLED
    11396 
    11397 VmaBlockVector::VmaBlockVector(
    11398  VmaAllocator hAllocator,
    11399  VmaPool hParentPool,
    11400  uint32_t memoryTypeIndex,
    11401  VkDeviceSize preferredBlockSize,
    11402  size_t minBlockCount,
    11403  size_t maxBlockCount,
    11404  VkDeviceSize bufferImageGranularity,
    11405  uint32_t frameInUseCount,
    11406  bool isCustomPool,
    11407  bool explicitBlockSize,
    11408  uint32_t algorithm) :
    11409  m_hAllocator(hAllocator),
    11410  m_hParentPool(hParentPool),
    11411  m_MemoryTypeIndex(memoryTypeIndex),
    11412  m_PreferredBlockSize(preferredBlockSize),
    11413  m_MinBlockCount(minBlockCount),
    11414  m_MaxBlockCount(maxBlockCount),
    11415  m_BufferImageGranularity(bufferImageGranularity),
    11416  m_FrameInUseCount(frameInUseCount),
    11417  m_IsCustomPool(isCustomPool),
    11418  m_ExplicitBlockSize(explicitBlockSize),
    11419  m_Algorithm(algorithm),
    11420  m_HasEmptyBlock(false),
    11421  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
    11422  m_NextBlockId(0)
    11423 {
    11424 }
    11425 
    11426 VmaBlockVector::~VmaBlockVector()
    11427 {
    11428  for(size_t i = m_Blocks.size(); i--; )
    11429  {
    11430  m_Blocks[i]->Destroy(m_hAllocator);
    11431  vma_delete(m_hAllocator, m_Blocks[i]);
    11432  }
    11433 }
    11434 
    11435 VkResult VmaBlockVector::CreateMinBlocks()
    11436 {
    11437  for(size_t i = 0; i < m_MinBlockCount; ++i)
    11438  {
    11439  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
    11440  if(res != VK_SUCCESS)
    11441  {
    11442  return res;
    11443  }
    11444  }
    11445  return VK_SUCCESS;
    11446 }
    11447 
    11448 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
    11449 {
    11450  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    11451 
    11452  const size_t blockCount = m_Blocks.size();
    11453 
    11454  pStats->size = 0;
    11455  pStats->unusedSize = 0;
    11456  pStats->allocationCount = 0;
    11457  pStats->unusedRangeCount = 0;
    11458  pStats->unusedRangeSizeMax = 0;
    11459  pStats->blockCount = blockCount;
    11460 
    11461  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    11462  {
    11463  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    11464  VMA_ASSERT(pBlock);
    11465  VMA_HEAVY_ASSERT(pBlock->Validate());
    11466  pBlock->m_pMetadata->AddPoolStats(*pStats);
    11467  }
    11468 }
    11469 
    11470 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
    11471 {
    11472  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    11473  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
    11474  (VMA_DEBUG_MARGIN > 0) &&
    11475  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
    11476  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
    11477 }
    11478 
    11479 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
    11480 
    11481 VkResult VmaBlockVector::Allocate(
    11482  uint32_t currentFrameIndex,
    11483  VkDeviceSize size,
    11484  VkDeviceSize alignment,
    11485  const VmaAllocationCreateInfo& createInfo,
    11486  VmaSuballocationType suballocType,
    11487  size_t allocationCount,
    11488  VmaAllocation* pAllocations)
    11489 {
    11490  size_t allocIndex;
    11491  VkResult res = VK_SUCCESS;
    11492 
    11493  if(IsCorruptionDetectionEnabled())
    11494  {
    11495  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11496  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
    11497  }
    11498 
    11499  {
    11500  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11501  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    11502  {
    11503  res = AllocatePage(
    11504  currentFrameIndex,
    11505  size,
    11506  alignment,
    11507  createInfo,
    11508  suballocType,
    11509  pAllocations + allocIndex);
    11510  if(res != VK_SUCCESS)
    11511  {
    11512  break;
    11513  }
    11514  }
    11515  }
    11516 
    11517  if(res != VK_SUCCESS)
    11518  {
    11519  // Free all already created allocations.
    11520  while(allocIndex--)
    11521  {
    11522  Free(pAllocations[allocIndex]);
    11523  }
    11524  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    11525  }
    11526 
    11527  return res;
    11528 }
    11529 
    11530 VkResult VmaBlockVector::AllocatePage(
    11531  uint32_t currentFrameIndex,
    11532  VkDeviceSize size,
    11533  VkDeviceSize alignment,
    11534  const VmaAllocationCreateInfo& createInfo,
    11535  VmaSuballocationType suballocType,
    11536  VmaAllocation* pAllocation)
    11537 {
    11538  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    11539  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
    11540  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    11541  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    11542  const bool canCreateNewBlock =
    11543  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
    11544  (m_Blocks.size() < m_MaxBlockCount);
    11545  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
    11546 
    11547  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
    11548  // Which in turn is available only when maxBlockCount = 1.
    11549  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
    11550  {
    11551  canMakeOtherLost = false;
    11552  }
    11553 
    11554  // Upper address can only be used with linear allocator and within single memory block.
    11555  if(isUpperAddress &&
    11556  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
    11557  {
    11558  return VK_ERROR_FEATURE_NOT_PRESENT;
    11559  }
    11560 
    11561  // Validate strategy.
    11562  switch(strategy)
    11563  {
    11564  case 0:
    11566  break;
    11570  break;
    11571  default:
    11572  return VK_ERROR_FEATURE_NOT_PRESENT;
    11573  }
    11574 
    11575  // Early reject: requested allocation size is larger that maximum block size for this block vector.
    11576  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
    11577  {
    11578  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11579  }
    11580 
    11581  /*
    11582  Under certain condition, this whole section can be skipped for optimization, so
    11583  we move on directly to trying to allocate with canMakeOtherLost. That's the case
    11584  e.g. for custom pools with linear algorithm.
    11585  */
    11586  if(!canMakeOtherLost || canCreateNewBlock)
    11587  {
    11588  // 1. Search existing allocations. Try to allocate without making other allocations lost.
    11589  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
    11591 
    11592  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11593  {
    11594  // Use only last block.
    11595  if(!m_Blocks.empty())
    11596  {
    11597  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
    11598  VMA_ASSERT(pCurrBlock);
    11599  VkResult res = AllocateFromBlock(
    11600  pCurrBlock,
    11601  currentFrameIndex,
    11602  size,
    11603  alignment,
    11604  allocFlagsCopy,
    11605  createInfo.pUserData,
    11606  suballocType,
    11607  strategy,
    11608  pAllocation);
    11609  if(res == VK_SUCCESS)
    11610  {
    11611  VMA_DEBUG_LOG(" Returned from last block #%u", (uint32_t)(m_Blocks.size() - 1));
    11612  return VK_SUCCESS;
    11613  }
    11614  }
    11615  }
    11616  else
    11617  {
    11619  {
    11620  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11621  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11622  {
    11623  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11624  VMA_ASSERT(pCurrBlock);
    11625  VkResult res = AllocateFromBlock(
    11626  pCurrBlock,
    11627  currentFrameIndex,
    11628  size,
    11629  alignment,
    11630  allocFlagsCopy,
    11631  createInfo.pUserData,
    11632  suballocType,
    11633  strategy,
    11634  pAllocation);
    11635  if(res == VK_SUCCESS)
    11636  {
    11637  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11638  return VK_SUCCESS;
    11639  }
    11640  }
    11641  }
    11642  else // WORST_FIT, FIRST_FIT
    11643  {
    11644  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11645  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11646  {
    11647  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11648  VMA_ASSERT(pCurrBlock);
    11649  VkResult res = AllocateFromBlock(
    11650  pCurrBlock,
    11651  currentFrameIndex,
    11652  size,
    11653  alignment,
    11654  allocFlagsCopy,
    11655  createInfo.pUserData,
    11656  suballocType,
    11657  strategy,
    11658  pAllocation);
    11659  if(res == VK_SUCCESS)
    11660  {
    11661  VMA_DEBUG_LOG(" Returned from existing block #%u", (uint32_t)blockIndex);
    11662  return VK_SUCCESS;
    11663  }
    11664  }
    11665  }
    11666  }
    11667 
    11668  // 2. Try to create new block.
    11669  if(canCreateNewBlock)
    11670  {
    11671  // Calculate optimal size for new block.
    11672  VkDeviceSize newBlockSize = m_PreferredBlockSize;
    11673  uint32_t newBlockSizeShift = 0;
    11674  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
    11675 
    11676  if(!m_ExplicitBlockSize)
    11677  {
    11678  // Allocate 1/8, 1/4, 1/2 as first blocks.
    11679  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
    11680  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
    11681  {
    11682  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11683  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
    11684  {
    11685  newBlockSize = smallerNewBlockSize;
    11686  ++newBlockSizeShift;
    11687  }
    11688  else
    11689  {
    11690  break;
    11691  }
    11692  }
    11693  }
    11694 
    11695  size_t newBlockIndex = 0;
    11696  VkResult res = CreateBlock(newBlockSize, &newBlockIndex);
    11697  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
    11698  if(!m_ExplicitBlockSize)
    11699  {
    11700  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
    11701  {
    11702  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
    11703  if(smallerNewBlockSize >= size)
    11704  {
    11705  newBlockSize = smallerNewBlockSize;
    11706  ++newBlockSizeShift;
    11707  res = CreateBlock(newBlockSize, &newBlockIndex);
    11708  }
    11709  else
    11710  {
    11711  break;
    11712  }
    11713  }
    11714  }
    11715 
    11716  if(res == VK_SUCCESS)
    11717  {
    11718  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
    11719  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
    11720 
    11721  res = AllocateFromBlock(
    11722  pBlock,
    11723  currentFrameIndex,
    11724  size,
    11725  alignment,
    11726  allocFlagsCopy,
    11727  createInfo.pUserData,
    11728  suballocType,
    11729  strategy,
    11730  pAllocation);
    11731  if(res == VK_SUCCESS)
    11732  {
    11733  VMA_DEBUG_LOG(" Created new block Size=%llu", newBlockSize);
    11734  return VK_SUCCESS;
    11735  }
    11736  else
    11737  {
    11738  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
    11739  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11740  }
    11741  }
    11742  }
    11743  }
    11744 
    11745  // 3. Try to allocate from existing blocks with making other allocations lost.
    11746  if(canMakeOtherLost)
    11747  {
    11748  uint32_t tryIndex = 0;
    11749  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
    11750  {
    11751  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
    11752  VmaAllocationRequest bestRequest = {};
    11753  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
    11754 
    11755  // 1. Search existing allocations.
    11757  {
    11758  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
    11759  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
    11760  {
    11761  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11762  VMA_ASSERT(pCurrBlock);
    11763  VmaAllocationRequest currRequest = {};
    11764  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11765  currentFrameIndex,
    11766  m_FrameInUseCount,
    11767  m_BufferImageGranularity,
    11768  size,
    11769  alignment,
    11770  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11771  suballocType,
    11772  canMakeOtherLost,
    11773  strategy,
    11774  &currRequest))
    11775  {
    11776  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11777  if(pBestRequestBlock == VMA_NULL ||
    11778  currRequestCost < bestRequestCost)
    11779  {
    11780  pBestRequestBlock = pCurrBlock;
    11781  bestRequest = currRequest;
    11782  bestRequestCost = currRequestCost;
    11783 
    11784  if(bestRequestCost == 0)
    11785  {
    11786  break;
    11787  }
    11788  }
    11789  }
    11790  }
    11791  }
    11792  else // WORST_FIT, FIRST_FIT
    11793  {
    11794  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
    11795  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    11796  {
    11797  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
    11798  VMA_ASSERT(pCurrBlock);
    11799  VmaAllocationRequest currRequest = {};
    11800  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
    11801  currentFrameIndex,
    11802  m_FrameInUseCount,
    11803  m_BufferImageGranularity,
    11804  size,
    11805  alignment,
    11806  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
    11807  suballocType,
    11808  canMakeOtherLost,
    11809  strategy,
    11810  &currRequest))
    11811  {
    11812  const VkDeviceSize currRequestCost = currRequest.CalcCost();
    11813  if(pBestRequestBlock == VMA_NULL ||
    11814  currRequestCost < bestRequestCost ||
    11816  {
    11817  pBestRequestBlock = pCurrBlock;
    11818  bestRequest = currRequest;
    11819  bestRequestCost = currRequestCost;
    11820 
    11821  if(bestRequestCost == 0 ||
    11823  {
    11824  break;
    11825  }
    11826  }
    11827  }
    11828  }
    11829  }
    11830 
    11831  if(pBestRequestBlock != VMA_NULL)
    11832  {
    11833  if(mapped)
    11834  {
    11835  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
    11836  if(res != VK_SUCCESS)
    11837  {
    11838  return res;
    11839  }
    11840  }
    11841 
    11842  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
    11843  currentFrameIndex,
    11844  m_FrameInUseCount,
    11845  &bestRequest))
    11846  {
    11847  // We no longer have an empty Allocation.
    11848  if(pBestRequestBlock->m_pMetadata->IsEmpty())
    11849  {
    11850  m_HasEmptyBlock = false;
    11851  }
    11852  // Allocate from this pBlock.
    11853  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    11854  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    11855  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
    11856  (*pAllocation)->InitBlockAllocation(
    11857  pBestRequestBlock,
    11858  bestRequest.offset,
    11859  alignment,
    11860  size,
    11861  suballocType,
    11862  mapped,
    11863  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    11864  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
    11865  VMA_DEBUG_LOG(" Returned from existing block");
    11866  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
    11867  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    11868  {
    11869  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    11870  }
    11871  if(IsCorruptionDetectionEnabled())
    11872  {
    11873  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
    11874  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    11875  }
    11876  return VK_SUCCESS;
    11877  }
    11878  // else: Some allocations must have been touched while we are here. Next try.
    11879  }
    11880  else
    11881  {
    11882  // Could not find place in any of the blocks - break outer loop.
    11883  break;
    11884  }
    11885  }
    11886  /* Maximum number of tries exceeded - a very unlike event when many other
    11887  threads are simultaneously touching allocations making it impossible to make
    11888  lost at the same time as we try to allocate. */
    11889  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
    11890  {
    11891  return VK_ERROR_TOO_MANY_OBJECTS;
    11892  }
    11893  }
    11894 
    11895  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    11896 }
    11897 
    11898 void VmaBlockVector::Free(
    11899  VmaAllocation hAllocation)
    11900 {
    11901  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
    11902 
    11903  // Scope for lock.
    11904  {
    11905  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    11906 
    11907  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    11908 
    11909  if(IsCorruptionDetectionEnabled())
    11910  {
    11911  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
    11912  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
    11913  }
    11914 
    11915  if(hAllocation->IsPersistentMap())
    11916  {
    11917  pBlock->Unmap(m_hAllocator, 1);
    11918  }
    11919 
    11920  pBlock->m_pMetadata->Free(hAllocation);
    11921  VMA_HEAVY_ASSERT(pBlock->Validate());
    11922 
    11923  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
    11924 
    11925  // pBlock became empty after this deallocation.
    11926  if(pBlock->m_pMetadata->IsEmpty())
    11927  {
    11928  // Already has empty Allocation. We don't want to have two, so delete this one.
    11929  if(m_HasEmptyBlock && m_Blocks.size() > m_MinBlockCount)
    11930  {
    11931  pBlockToDelete = pBlock;
    11932  Remove(pBlock);
    11933  }
    11934  // We now have first empty block.
    11935  else
    11936  {
    11937  m_HasEmptyBlock = true;
    11938  }
    11939  }
    11940  // pBlock didn't become empty, but we have another empty block - find and free that one.
    11941  // (This is optional, heuristics.)
    11942  else if(m_HasEmptyBlock)
    11943  {
    11944  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
    11945  if(pLastBlock->m_pMetadata->IsEmpty() && m_Blocks.size() > m_MinBlockCount)
    11946  {
    11947  pBlockToDelete = pLastBlock;
    11948  m_Blocks.pop_back();
    11949  m_HasEmptyBlock = false;
    11950  }
    11951  }
    11952 
    11953  IncrementallySortBlocks();
    11954  }
    11955 
    11956  // Destruction of a free Allocation. Deferred until this point, outside of mutex
    11957  // lock, for performance reason.
    11958  if(pBlockToDelete != VMA_NULL)
    11959  {
    11960  VMA_DEBUG_LOG(" Deleted empty allocation");
    11961  pBlockToDelete->Destroy(m_hAllocator);
    11962  vma_delete(m_hAllocator, pBlockToDelete);
    11963  }
    11964 }
    11965 
    11966 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
    11967 {
    11968  VkDeviceSize result = 0;
    11969  for(size_t i = m_Blocks.size(); i--; )
    11970  {
    11971  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
    11972  if(result >= m_PreferredBlockSize)
    11973  {
    11974  break;
    11975  }
    11976  }
    11977  return result;
    11978 }
    11979 
    11980 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
    11981 {
    11982  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    11983  {
    11984  if(m_Blocks[blockIndex] == pBlock)
    11985  {
    11986  VmaVectorRemove(m_Blocks, blockIndex);
    11987  return;
    11988  }
    11989  }
    11990  VMA_ASSERT(0);
    11991 }
    11992 
    11993 void VmaBlockVector::IncrementallySortBlocks()
    11994 {
    11995  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
    11996  {
    11997  // Bubble sort only until first swap.
    11998  for(size_t i = 1; i < m_Blocks.size(); ++i)
    11999  {
    12000  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
    12001  {
    12002  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
    12003  return;
    12004  }
    12005  }
    12006  }
    12007 }
    12008 
    12009 VkResult VmaBlockVector::AllocateFromBlock(
    12010  VmaDeviceMemoryBlock* pBlock,
    12011  uint32_t currentFrameIndex,
    12012  VkDeviceSize size,
    12013  VkDeviceSize alignment,
    12014  VmaAllocationCreateFlags allocFlags,
    12015  void* pUserData,
    12016  VmaSuballocationType suballocType,
    12017  uint32_t strategy,
    12018  VmaAllocation* pAllocation)
    12019 {
    12020  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
    12021  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
    12022  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    12023  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
    12024 
    12025  VmaAllocationRequest currRequest = {};
    12026  if(pBlock->m_pMetadata->CreateAllocationRequest(
    12027  currentFrameIndex,
    12028  m_FrameInUseCount,
    12029  m_BufferImageGranularity,
    12030  size,
    12031  alignment,
    12032  isUpperAddress,
    12033  suballocType,
    12034  false, // canMakeOtherLost
    12035  strategy,
    12036  &currRequest))
    12037  {
    12038  // Allocate from pCurrBlock.
    12039  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
    12040 
    12041  if(mapped)
    12042  {
    12043  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
    12044  if(res != VK_SUCCESS)
    12045  {
    12046  return res;
    12047  }
    12048  }
    12049 
    12050  // We no longer have an empty Allocation.
    12051  if(pBlock->m_pMetadata->IsEmpty())
    12052  {
    12053  m_HasEmptyBlock = false;
    12054  }
    12055 
    12056  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate();
    12057  (*pAllocation)->Ctor(currentFrameIndex, isUserDataString);
    12058  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
    12059  (*pAllocation)->InitBlockAllocation(
    12060  pBlock,
    12061  currRequest.offset,
    12062  alignment,
    12063  size,
    12064  suballocType,
    12065  mapped,
    12066  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
    12067  VMA_HEAVY_ASSERT(pBlock->Validate());
    12068  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
    12069  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    12070  {
    12071  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    12072  }
    12073  if(IsCorruptionDetectionEnabled())
    12074  {
    12075  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
    12076  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
    12077  }
    12078  return VK_SUCCESS;
    12079  }
    12080  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    12081 }
    12082 
    12083 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
    12084 {
    12085  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    12086  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
    12087  allocInfo.allocationSize = blockSize;
    12088  VkDeviceMemory mem = VK_NULL_HANDLE;
    12089  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
    12090  if(res < 0)
    12091  {
    12092  return res;
    12093  }
    12094 
    12095  // New VkDeviceMemory successfully created.
    12096 
    12097  // Create new Allocation for it.
    12098  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
    12099  pBlock->Init(
    12100  m_hAllocator,
    12101  m_hParentPool,
    12102  m_MemoryTypeIndex,
    12103  mem,
    12104  allocInfo.allocationSize,
    12105  m_NextBlockId++,
    12106  m_Algorithm);
    12107 
    12108  m_Blocks.push_back(pBlock);
    12109  if(pNewBlockIndex != VMA_NULL)
    12110  {
    12111  *pNewBlockIndex = m_Blocks.size() - 1;
    12112  }
    12113 
    12114  return VK_SUCCESS;
    12115 }
    12116 
    12117 void VmaBlockVector::ApplyDefragmentationMovesCpu(
    12118  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12119  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
    12120 {
    12121  const size_t blockCount = m_Blocks.size();
    12122  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
    12123 
    12124  enum BLOCK_FLAG
    12125  {
    12126  BLOCK_FLAG_USED = 0x00000001,
    12127  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
    12128  };
    12129 
    12130  struct BlockInfo
    12131  {
    12132  uint32_t flags;
    12133  void* pMappedData;
    12134  };
    12135  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
    12136  blockInfo(blockCount, VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
    12137  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
    12138 
    12139  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12140  const size_t moveCount = moves.size();
    12141  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12142  {
    12143  const VmaDefragmentationMove& move = moves[moveIndex];
    12144  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
    12145  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
    12146  }
    12147 
    12148  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12149 
    12150  // Go over all blocks. Get mapped pointer or map if necessary.
    12151  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12152  {
    12153  BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12154  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12155  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
    12156  {
    12157  currBlockInfo.pMappedData = pBlock->GetMappedData();
    12158  // It is not originally mapped - map it.
    12159  if(currBlockInfo.pMappedData == VMA_NULL)
    12160  {
    12161  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
    12162  if(pDefragCtx->res == VK_SUCCESS)
    12163  {
    12164  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
    12165  }
    12166  }
    12167  }
    12168  }
    12169 
    12170  // Go over all moves. Do actual data transfer.
    12171  if(pDefragCtx->res == VK_SUCCESS)
    12172  {
    12173  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12174  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12175 
    12176  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12177  {
    12178  const VmaDefragmentationMove& move = moves[moveIndex];
    12179 
    12180  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
    12181  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
    12182 
    12183  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
    12184 
    12185  // Invalidate source.
    12186  if(isNonCoherent)
    12187  {
    12188  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
    12189  memRange.memory = pSrcBlock->GetDeviceMemory();
    12190  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
    12191  memRange.size = VMA_MIN(
    12192  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
    12193  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
    12194  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12195  }
    12196 
    12197  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
    12198  memmove(
    12199  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
    12200  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
    12201  static_cast<size_t>(move.size));
    12202 
    12203  if(IsCorruptionDetectionEnabled())
    12204  {
    12205  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
    12206  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
    12207  }
    12208 
    12209  // Flush destination.
    12210  if(isNonCoherent)
    12211  {
    12212  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
    12213  memRange.memory = pDstBlock->GetDeviceMemory();
    12214  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
    12215  memRange.size = VMA_MIN(
    12216  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
    12217  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
    12218  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
    12219  }
    12220  }
    12221  }
    12222 
    12223  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
    12224  // Regardless of pCtx->res == VK_SUCCESS.
    12225  for(size_t blockIndex = blockCount; blockIndex--; )
    12226  {
    12227  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
    12228  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
    12229  {
    12230  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12231  pBlock->Unmap(m_hAllocator, 1);
    12232  }
    12233  }
    12234 }
    12235 
    12236 void VmaBlockVector::ApplyDefragmentationMovesGpu(
    12237  class VmaBlockVectorDefragmentationContext* pDefragCtx,
    12238  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12239  VkCommandBuffer commandBuffer)
    12240 {
    12241  const size_t blockCount = m_Blocks.size();
    12242 
    12243  pDefragCtx->blockContexts.resize(blockCount);
    12244  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
    12245 
    12246  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
    12247  const size_t moveCount = moves.size();
    12248  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12249  {
    12250  const VmaDefragmentationMove& move = moves[moveIndex];
    12251  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12252  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
    12253  }
    12254 
    12255  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
    12256 
    12257  // Go over all blocks. Create and bind buffer for whole block if necessary.
    12258  {
    12259  VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
    12260  bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
    12261  VK_BUFFER_USAGE_TRANSFER_DST_BIT;
    12262 
    12263  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
    12264  {
    12265  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
    12266  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12267  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
    12268  {
    12269  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
    12270  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
    12271  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
    12272  if(pDefragCtx->res == VK_SUCCESS)
    12273  {
    12274  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
    12275  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
    12276  }
    12277  }
    12278  }
    12279  }
    12280 
    12281  // Go over all moves. Post data transfer commands to command buffer.
    12282  if(pDefragCtx->res == VK_SUCCESS)
    12283  {
    12284  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    12285  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    12286 
    12287  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
    12288  {
    12289  const VmaDefragmentationMove& move = moves[moveIndex];
    12290 
    12291  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
    12292  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
    12293 
    12294  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
    12295 
    12296  VkBufferCopy region = {
    12297  move.srcOffset,
    12298  move.dstOffset,
    12299  move.size };
    12300  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
    12301  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
    12302  }
    12303  }
    12304 
    12305  // Save buffers to defrag context for later destruction.
    12306  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
    12307  {
    12308  pDefragCtx->res = VK_NOT_READY;
    12309  }
    12310 }
    12311 
    12312 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
    12313 {
    12314  m_HasEmptyBlock = false;
    12315  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
    12316  {
    12317  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
    12318  if(pBlock->m_pMetadata->IsEmpty())
    12319  {
    12320  if(m_Blocks.size() > m_MinBlockCount)
    12321  {
    12322  if(pDefragmentationStats != VMA_NULL)
    12323  {
    12324  ++pDefragmentationStats->deviceMemoryBlocksFreed;
    12325  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
    12326  }
    12327 
    12328  VmaVectorRemove(m_Blocks, blockIndex);
    12329  pBlock->Destroy(m_hAllocator);
    12330  vma_delete(m_hAllocator, pBlock);
    12331  }
    12332  else
    12333  {
    12334  m_HasEmptyBlock = true;
    12335  }
    12336  }
    12337  }
    12338 }
    12339 
    12340 #if VMA_STATS_STRING_ENABLED
    12341 
    12342 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
    12343 {
    12344  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12345 
    12346  json.BeginObject();
    12347 
    12348  if(m_IsCustomPool)
    12349  {
    12350  json.WriteString("MemoryTypeIndex");
    12351  json.WriteNumber(m_MemoryTypeIndex);
    12352 
    12353  json.WriteString("BlockSize");
    12354  json.WriteNumber(m_PreferredBlockSize);
    12355 
    12356  json.WriteString("BlockCount");
    12357  json.BeginObject(true);
    12358  if(m_MinBlockCount > 0)
    12359  {
    12360  json.WriteString("Min");
    12361  json.WriteNumber((uint64_t)m_MinBlockCount);
    12362  }
    12363  if(m_MaxBlockCount < SIZE_MAX)
    12364  {
    12365  json.WriteString("Max");
    12366  json.WriteNumber((uint64_t)m_MaxBlockCount);
    12367  }
    12368  json.WriteString("Cur");
    12369  json.WriteNumber((uint64_t)m_Blocks.size());
    12370  json.EndObject();
    12371 
    12372  if(m_FrameInUseCount > 0)
    12373  {
    12374  json.WriteString("FrameInUseCount");
    12375  json.WriteNumber(m_FrameInUseCount);
    12376  }
    12377 
    12378  if(m_Algorithm != 0)
    12379  {
    12380  json.WriteString("Algorithm");
    12381  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
    12382  }
    12383  }
    12384  else
    12385  {
    12386  json.WriteString("PreferredBlockSize");
    12387  json.WriteNumber(m_PreferredBlockSize);
    12388  }
    12389 
    12390  json.WriteString("Blocks");
    12391  json.BeginObject();
    12392  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12393  {
    12394  json.BeginString();
    12395  json.ContinueString(m_Blocks[i]->GetId());
    12396  json.EndString();
    12397 
    12398  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
    12399  }
    12400  json.EndObject();
    12401 
    12402  json.EndObject();
    12403 }
    12404 
    12405 #endif // #if VMA_STATS_STRING_ENABLED
    12406 
    12407 void VmaBlockVector::Defragment(
    12408  class VmaBlockVectorDefragmentationContext* pCtx,
    12409  VmaDefragmentationStats* pStats,
    12410  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
    12411  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
    12412  VkCommandBuffer commandBuffer)
    12413 {
    12414  pCtx->res = VK_SUCCESS;
    12415 
    12416  const VkMemoryPropertyFlags memPropFlags =
    12417  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
    12418  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
    12419  const bool isHostCoherent = (memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0;
    12420 
    12421  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
    12422  isHostVisible;
    12423  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
    12424  !IsCorruptionDetectionEnabled();
    12425 
    12426  // There are options to defragment this memory type.
    12427  if(canDefragmentOnCpu || canDefragmentOnGpu)
    12428  {
    12429  bool defragmentOnGpu;
    12430  // There is only one option to defragment this memory type.
    12431  if(canDefragmentOnGpu != canDefragmentOnCpu)
    12432  {
    12433  defragmentOnGpu = canDefragmentOnGpu;
    12434  }
    12435  // Both options are available: Heuristics to choose the best one.
    12436  else
    12437  {
    12438  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
    12439  m_hAllocator->IsIntegratedGpu();
    12440  }
    12441 
    12442  bool overlappingMoveSupported = !defragmentOnGpu;
    12443 
    12444  if(m_hAllocator->m_UseMutex)
    12445  {
    12446  m_Mutex.LockWrite();
    12447  pCtx->mutexLocked = true;
    12448  }
    12449 
    12450  pCtx->Begin(overlappingMoveSupported);
    12451 
    12452  // Defragment.
    12453 
    12454  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
    12455  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
    12456  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > moves =
    12457  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >(VmaStlAllocator<VmaDefragmentationMove>(m_hAllocator->GetAllocationCallbacks()));
    12458  pCtx->res = pCtx->GetAlgorithm()->Defragment(moves, maxBytesToMove, maxAllocationsToMove);
    12459 
    12460  // Accumulate statistics.
    12461  if(pStats != VMA_NULL)
    12462  {
    12463  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
    12464  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
    12465  pStats->bytesMoved += bytesMoved;
    12466  pStats->allocationsMoved += allocationsMoved;
    12467  VMA_ASSERT(bytesMoved <= maxBytesToMove);
    12468  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
    12469  if(defragmentOnGpu)
    12470  {
    12471  maxGpuBytesToMove -= bytesMoved;
    12472  maxGpuAllocationsToMove -= allocationsMoved;
    12473  }
    12474  else
    12475  {
    12476  maxCpuBytesToMove -= bytesMoved;
    12477  maxCpuAllocationsToMove -= allocationsMoved;
    12478  }
    12479  }
    12480 
    12481  if(pCtx->res >= VK_SUCCESS)
    12482  {
    12483  if(defragmentOnGpu)
    12484  {
    12485  ApplyDefragmentationMovesGpu(pCtx, moves, commandBuffer);
    12486  }
    12487  else
    12488  {
    12489  ApplyDefragmentationMovesCpu(pCtx, moves);
    12490  }
    12491  }
    12492  }
    12493 }
    12494 
    12495 void VmaBlockVector::DefragmentationEnd(
    12496  class VmaBlockVectorDefragmentationContext* pCtx,
    12497  VmaDefragmentationStats* pStats)
    12498 {
    12499  // Destroy buffers.
    12500  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--; )
    12501  {
    12502  VmaBlockDefragmentationContext& blockCtx = pCtx->blockContexts[blockIndex];
    12503  if(blockCtx.hBuffer)
    12504  {
    12505  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(
    12506  m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
    12507  }
    12508  }
    12509 
    12510  if(pCtx->res >= VK_SUCCESS)
    12511  {
    12512  FreeEmptyBlocks(pStats);
    12513  }
    12514 
    12515  if(pCtx->mutexLocked)
    12516  {
    12517  VMA_ASSERT(m_hAllocator->m_UseMutex);
    12518  m_Mutex.UnlockWrite();
    12519  }
    12520 }
    12521 
    12522 size_t VmaBlockVector::CalcAllocationCount() const
    12523 {
    12524  size_t result = 0;
    12525  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12526  {
    12527  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
    12528  }
    12529  return result;
    12530 }
    12531 
    12532 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
    12533 {
    12534  if(m_BufferImageGranularity == 1)
    12535  {
    12536  return false;
    12537  }
    12538  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
    12539  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
    12540  {
    12541  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
    12542  VMA_ASSERT(m_Algorithm == 0);
    12543  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
    12544  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
    12545  {
    12546  return true;
    12547  }
    12548  }
    12549  return false;
    12550 }
    12551 
    12552 void VmaBlockVector::MakePoolAllocationsLost(
    12553  uint32_t currentFrameIndex,
    12554  size_t* pLostAllocationCount)
    12555 {
    12556  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
    12557  size_t lostAllocationCount = 0;
    12558  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12559  {
    12560  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12561  VMA_ASSERT(pBlock);
    12562  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
    12563  }
    12564  if(pLostAllocationCount != VMA_NULL)
    12565  {
    12566  *pLostAllocationCount = lostAllocationCount;
    12567  }
    12568 }
    12569 
    12570 VkResult VmaBlockVector::CheckCorruption()
    12571 {
    12572  if(!IsCorruptionDetectionEnabled())
    12573  {
    12574  return VK_ERROR_FEATURE_NOT_PRESENT;
    12575  }
    12576 
    12577  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12578  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12579  {
    12580  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12581  VMA_ASSERT(pBlock);
    12582  VkResult res = pBlock->CheckCorruption(m_hAllocator);
    12583  if(res != VK_SUCCESS)
    12584  {
    12585  return res;
    12586  }
    12587  }
    12588  return VK_SUCCESS;
    12589 }
    12590 
    12591 void VmaBlockVector::AddStats(VmaStats* pStats)
    12592 {
    12593  const uint32_t memTypeIndex = m_MemoryTypeIndex;
    12594  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
    12595 
    12596  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
    12597 
    12598  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
    12599  {
    12600  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
    12601  VMA_ASSERT(pBlock);
    12602  VMA_HEAVY_ASSERT(pBlock->Validate());
    12603  VmaStatInfo allocationStatInfo;
    12604  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
    12605  VmaAddStatInfo(pStats->total, allocationStatInfo);
    12606  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    12607  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    12608  }
    12609 }
    12610 
    12612 // VmaDefragmentationAlgorithm_Generic members definition
    12613 
    12614 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
    12615  VmaAllocator hAllocator,
    12616  VmaBlockVector* pBlockVector,
    12617  uint32_t currentFrameIndex,
    12618  bool overlappingMoveSupported) :
    12619  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12620  m_AllocationCount(0),
    12621  m_AllAllocations(false),
    12622  m_BytesMoved(0),
    12623  m_AllocationsMoved(0),
    12624  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
    12625 {
    12626  // Create block info for each block.
    12627  const size_t blockCount = m_pBlockVector->m_Blocks.size();
    12628  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12629  {
    12630  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
    12631  pBlockInfo->m_OriginalBlockIndex = blockIndex;
    12632  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
    12633  m_Blocks.push_back(pBlockInfo);
    12634  }
    12635 
    12636  // Sort them by m_pBlock pointer value.
    12637  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
    12638 }
    12639 
    12640 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
    12641 {
    12642  for(size_t i = m_Blocks.size(); i--; )
    12643  {
    12644  vma_delete(m_hAllocator, m_Blocks[i]);
    12645  }
    12646 }
    12647 
    12648 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    12649 {
    12650  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
    12651  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
    12652  {
    12653  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
    12654  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
    12655  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
    12656  {
    12657  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
    12658  (*it)->m_Allocations.push_back(allocInfo);
    12659  }
    12660  else
    12661  {
    12662  VMA_ASSERT(0);
    12663  }
    12664 
    12665  ++m_AllocationCount;
    12666  }
    12667 }
    12668 
    12669 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
    12670  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12671  VkDeviceSize maxBytesToMove,
    12672  uint32_t maxAllocationsToMove)
    12673 {
    12674  if(m_Blocks.empty())
    12675  {
    12676  return VK_SUCCESS;
    12677  }
    12678 
    12679  // This is a choice based on research.
    12680  // Option 1:
    12681  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
    12682  // Option 2:
    12683  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
    12684  // Option 3:
    12685  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
    12686 
    12687  size_t srcBlockMinIndex = 0;
    12688  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
    12689  /*
    12690  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
    12691  {
    12692  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
    12693  if(blocksWithNonMovableCount > 0)
    12694  {
    12695  srcBlockMinIndex = blocksWithNonMovableCount - 1;
    12696  }
    12697  }
    12698  */
    12699 
    12700  size_t srcBlockIndex = m_Blocks.size() - 1;
    12701  size_t srcAllocIndex = SIZE_MAX;
    12702  for(;;)
    12703  {
    12704  // 1. Find next allocation to move.
    12705  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
    12706  // 1.2. Then start from last to first m_Allocations.
    12707  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
    12708  {
    12709  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
    12710  {
    12711  // Finished: no more allocations to process.
    12712  if(srcBlockIndex == srcBlockMinIndex)
    12713  {
    12714  return VK_SUCCESS;
    12715  }
    12716  else
    12717  {
    12718  --srcBlockIndex;
    12719  srcAllocIndex = SIZE_MAX;
    12720  }
    12721  }
    12722  else
    12723  {
    12724  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
    12725  }
    12726  }
    12727 
    12728  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
    12729  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
    12730 
    12731  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
    12732  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
    12733  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
    12734  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
    12735 
    12736  // 2. Try to find new place for this allocation in preceding or current block.
    12737  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
    12738  {
    12739  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
    12740  VmaAllocationRequest dstAllocRequest;
    12741  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
    12742  m_CurrentFrameIndex,
    12743  m_pBlockVector->GetFrameInUseCount(),
    12744  m_pBlockVector->GetBufferImageGranularity(),
    12745  size,
    12746  alignment,
    12747  false, // upperAddress
    12748  suballocType,
    12749  false, // canMakeOtherLost
    12750  strategy,
    12751  &dstAllocRequest) &&
    12752  MoveMakesSense(
    12753  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
    12754  {
    12755  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
    12756 
    12757  // Reached limit on number of allocations or bytes to move.
    12758  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
    12759  (m_BytesMoved + size > maxBytesToMove))
    12760  {
    12761  return VK_SUCCESS;
    12762  }
    12763 
    12764  VmaDefragmentationMove move;
    12765  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
    12766  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
    12767  move.srcOffset = srcOffset;
    12768  move.dstOffset = dstAllocRequest.offset;
    12769  move.size = size;
    12770  moves.push_back(move);
    12771 
    12772  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
    12773  dstAllocRequest,
    12774  suballocType,
    12775  size,
    12776  allocInfo.m_hAllocation);
    12777  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
    12778 
    12779  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
    12780 
    12781  if(allocInfo.m_pChanged != VMA_NULL)
    12782  {
    12783  *allocInfo.m_pChanged = VK_TRUE;
    12784  }
    12785 
    12786  ++m_AllocationsMoved;
    12787  m_BytesMoved += size;
    12788 
    12789  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
    12790 
    12791  break;
    12792  }
    12793  }
    12794 
    12795  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
    12796 
    12797  if(srcAllocIndex > 0)
    12798  {
    12799  --srcAllocIndex;
    12800  }
    12801  else
    12802  {
    12803  if(srcBlockIndex > 0)
    12804  {
    12805  --srcBlockIndex;
    12806  srcAllocIndex = SIZE_MAX;
    12807  }
    12808  else
    12809  {
    12810  return VK_SUCCESS;
    12811  }
    12812  }
    12813  }
    12814 }
    12815 
    12816 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
    12817 {
    12818  size_t result = 0;
    12819  for(size_t i = 0; i < m_Blocks.size(); ++i)
    12820  {
    12821  if(m_Blocks[i]->m_HasNonMovableAllocations)
    12822  {
    12823  ++result;
    12824  }
    12825  }
    12826  return result;
    12827 }
    12828 
    12829 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
    12830  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12831  VkDeviceSize maxBytesToMove,
    12832  uint32_t maxAllocationsToMove)
    12833 {
    12834  if(!m_AllAllocations && m_AllocationCount == 0)
    12835  {
    12836  return VK_SUCCESS;
    12837  }
    12838 
    12839  const size_t blockCount = m_Blocks.size();
    12840  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    12841  {
    12842  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
    12843 
    12844  if(m_AllAllocations)
    12845  {
    12846  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
    12847  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
    12848  it != pMetadata->m_Suballocations.end();
    12849  ++it)
    12850  {
    12851  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
    12852  {
    12853  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
    12854  pBlockInfo->m_Allocations.push_back(allocInfo);
    12855  }
    12856  }
    12857  }
    12858 
    12859  pBlockInfo->CalcHasNonMovableAllocations();
    12860 
    12861  // This is a choice based on research.
    12862  // Option 1:
    12863  pBlockInfo->SortAllocationsByOffsetDescending();
    12864  // Option 2:
    12865  //pBlockInfo->SortAllocationsBySizeDescending();
    12866  }
    12867 
    12868  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
    12869  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
    12870 
    12871  // This is a choice based on research.
    12872  const uint32_t roundCount = 2;
    12873 
    12874  // Execute defragmentation rounds (the main part).
    12875  VkResult result = VK_SUCCESS;
    12876  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
    12877  {
    12878  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove);
    12879  }
    12880 
    12881  return result;
    12882 }
    12883 
    12884 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
    12885  size_t dstBlockIndex, VkDeviceSize dstOffset,
    12886  size_t srcBlockIndex, VkDeviceSize srcOffset)
    12887 {
    12888  if(dstBlockIndex < srcBlockIndex)
    12889  {
    12890  return true;
    12891  }
    12892  if(dstBlockIndex > srcBlockIndex)
    12893  {
    12894  return false;
    12895  }
    12896  if(dstOffset < srcOffset)
    12897  {
    12898  return true;
    12899  }
    12900  return false;
    12901 }
    12902 
    12904 // VmaDefragmentationAlgorithm_Fast
    12905 
    12906 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
    12907  VmaAllocator hAllocator,
    12908  VmaBlockVector* pBlockVector,
    12909  uint32_t currentFrameIndex,
    12910  bool overlappingMoveSupported) :
    12911  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
    12912  m_OverlappingMoveSupported(overlappingMoveSupported),
    12913  m_AllocationCount(0),
    12914  m_AllAllocations(false),
    12915  m_BytesMoved(0),
    12916  m_AllocationsMoved(0),
    12917  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
    12918 {
    12919  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
    12920 
    12921 }
    12922 
    12923 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
    12924 {
    12925 }
    12926 
    12927 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
    12928  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
    12929  VkDeviceSize maxBytesToMove,
    12930  uint32_t maxAllocationsToMove)
    12931 {
    12932  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
    12933 
    12934  const size_t blockCount = m_pBlockVector->GetBlockCount();
    12935  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
    12936  {
    12937  return VK_SUCCESS;
    12938  }
    12939 
    12940  PreprocessMetadata();
    12941 
    12942  // Sort blocks in order from most destination.
    12943 
    12944  m_BlockInfos.resize(blockCount);
    12945  for(size_t i = 0; i < blockCount; ++i)
    12946  {
    12947  m_BlockInfos[i].origBlockIndex = i;
    12948  }
    12949 
    12950  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
    12951  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
    12952  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
    12953  });
    12954 
    12955  // THE MAIN ALGORITHM
    12956 
    12957  FreeSpaceDatabase freeSpaceDb;
    12958 
    12959  size_t dstBlockInfoIndex = 0;
    12960  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    12961  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    12962  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    12963  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
    12964  VkDeviceSize dstOffset = 0;
    12965 
    12966  bool end = false;
    12967  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
    12968  {
    12969  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
    12970  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
    12971  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
    12972  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
    12973  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
    12974  {
    12975  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
    12976  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
    12977  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
    12978  if(m_AllocationsMoved == maxAllocationsToMove ||
    12979  m_BytesMoved + srcAllocSize > maxBytesToMove)
    12980  {
    12981  end = true;
    12982  break;
    12983  }
    12984  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
    12985 
    12986  // Try to place it in one of free spaces from the database.
    12987  size_t freeSpaceInfoIndex;
    12988  VkDeviceSize dstAllocOffset;
    12989  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
    12990  freeSpaceInfoIndex, dstAllocOffset))
    12991  {
    12992  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
    12993  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
    12994  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
    12995 
    12996  // Same block
    12997  if(freeSpaceInfoIndex == srcBlockInfoIndex)
    12998  {
    12999  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13000 
    13001  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13002 
    13003  VmaSuballocation suballoc = *srcSuballocIt;
    13004  suballoc.offset = dstAllocOffset;
    13005  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
    13006  m_BytesMoved += srcAllocSize;
    13007  ++m_AllocationsMoved;
    13008 
    13009  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13010  ++nextSuballocIt;
    13011  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13012  srcSuballocIt = nextSuballocIt;
    13013 
    13014  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13015 
    13016  VmaDefragmentationMove move = {
    13017  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13018  srcAllocOffset, dstAllocOffset,
    13019  srcAllocSize };
    13020  moves.push_back(move);
    13021  }
    13022  // Different block
    13023  else
    13024  {
    13025  // MOVE OPTION 2: Move the allocation to a different block.
    13026 
    13027  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
    13028 
    13029  VmaSuballocation suballoc = *srcSuballocIt;
    13030  suballoc.offset = dstAllocOffset;
    13031  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
    13032  m_BytesMoved += srcAllocSize;
    13033  ++m_AllocationsMoved;
    13034 
    13035  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13036  ++nextSuballocIt;
    13037  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13038  srcSuballocIt = nextSuballocIt;
    13039 
    13040  InsertSuballoc(pFreeSpaceMetadata, suballoc);
    13041 
    13042  VmaDefragmentationMove move = {
    13043  srcOrigBlockIndex, freeSpaceOrigBlockIndex,
    13044  srcAllocOffset, dstAllocOffset,
    13045  srcAllocSize };
    13046  moves.push_back(move);
    13047  }
    13048  }
    13049  else
    13050  {
    13051  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
    13052 
    13053  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
    13054  while(dstBlockInfoIndex < srcBlockInfoIndex &&
    13055  dstAllocOffset + srcAllocSize > dstBlockSize)
    13056  {
    13057  // But before that, register remaining free space at the end of dst block.
    13058  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
    13059 
    13060  ++dstBlockInfoIndex;
    13061  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
    13062  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
    13063  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
    13064  dstBlockSize = pDstMetadata->GetSize();
    13065  dstOffset = 0;
    13066  dstAllocOffset = 0;
    13067  }
    13068 
    13069  // Same block
    13070  if(dstBlockInfoIndex == srcBlockInfoIndex)
    13071  {
    13072  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
    13073 
    13074  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
    13075 
    13076  bool skipOver = overlap;
    13077  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
    13078  {
    13079  // If destination and source place overlap, skip if it would move it
    13080  // by only < 1/64 of its size.
    13081  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
    13082  }
    13083 
    13084  if(skipOver)
    13085  {
    13086  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
    13087 
    13088  dstOffset = srcAllocOffset + srcAllocSize;
    13089  ++srcSuballocIt;
    13090  }
    13091  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
    13092  else
    13093  {
    13094  srcSuballocIt->offset = dstAllocOffset;
    13095  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
    13096  dstOffset = dstAllocOffset + srcAllocSize;
    13097  m_BytesMoved += srcAllocSize;
    13098  ++m_AllocationsMoved;
    13099  ++srcSuballocIt;
    13100  VmaDefragmentationMove move = {
    13101  srcOrigBlockIndex, dstOrigBlockIndex,
    13102  srcAllocOffset, dstAllocOffset,
    13103  srcAllocSize };
    13104  moves.push_back(move);
    13105  }
    13106  }
    13107  // Different block
    13108  else
    13109  {
    13110  // MOVE OPTION 2: Move the allocation to a different block.
    13111 
    13112  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
    13113  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
    13114 
    13115  VmaSuballocation suballoc = *srcSuballocIt;
    13116  suballoc.offset = dstAllocOffset;
    13117  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
    13118  dstOffset = dstAllocOffset + srcAllocSize;
    13119  m_BytesMoved += srcAllocSize;
    13120  ++m_AllocationsMoved;
    13121 
    13122  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
    13123  ++nextSuballocIt;
    13124  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
    13125  srcSuballocIt = nextSuballocIt;
    13126 
    13127  pDstMetadata->m_Suballocations.push_back(suballoc);
    13128 
    13129  VmaDefragmentationMove move = {
    13130  srcOrigBlockIndex, dstOrigBlockIndex,
    13131  srcAllocOffset, dstAllocOffset,
    13132  srcAllocSize };
    13133  moves.push_back(move);
    13134  }
    13135  }
    13136  }
    13137  }
    13138 
    13139  m_BlockInfos.clear();
    13140 
    13141  PostprocessMetadata();
    13142 
    13143  return VK_SUCCESS;
    13144 }
    13145 
    13146 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
    13147 {
    13148  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13149  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13150  {
    13151  VmaBlockMetadata_Generic* const pMetadata =
    13152  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13153  pMetadata->m_FreeCount = 0;
    13154  pMetadata->m_SumFreeSize = pMetadata->GetSize();
    13155  pMetadata->m_FreeSuballocationsBySize.clear();
    13156  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13157  it != pMetadata->m_Suballocations.end(); )
    13158  {
    13159  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
    13160  {
    13161  VmaSuballocationList::iterator nextIt = it;
    13162  ++nextIt;
    13163  pMetadata->m_Suballocations.erase(it);
    13164  it = nextIt;
    13165  }
    13166  else
    13167  {
    13168  ++it;
    13169  }
    13170  }
    13171  }
    13172 }
    13173 
    13174 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
    13175 {
    13176  const size_t blockCount = m_pBlockVector->GetBlockCount();
    13177  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
    13178  {
    13179  VmaBlockMetadata_Generic* const pMetadata =
    13180  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
    13181  const VkDeviceSize blockSize = pMetadata->GetSize();
    13182 
    13183  // No allocations in this block - entire area is free.
    13184  if(pMetadata->m_Suballocations.empty())
    13185  {
    13186  pMetadata->m_FreeCount = 1;
    13187  //pMetadata->m_SumFreeSize is already set to blockSize.
    13188  VmaSuballocation suballoc = {
    13189  0, // offset
    13190  blockSize, // size
    13191  VMA_NULL, // hAllocation
    13192  VMA_SUBALLOCATION_TYPE_FREE };
    13193  pMetadata->m_Suballocations.push_back(suballoc);
    13194  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
    13195  }
    13196  // There are some allocations in this block.
    13197  else
    13198  {
    13199  VkDeviceSize offset = 0;
    13200  VmaSuballocationList::iterator it;
    13201  for(it = pMetadata->m_Suballocations.begin();
    13202  it != pMetadata->m_Suballocations.end();
    13203  ++it)
    13204  {
    13205  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
    13206  VMA_ASSERT(it->offset >= offset);
    13207 
    13208  // Need to insert preceding free space.
    13209  if(it->offset > offset)
    13210  {
    13211  ++pMetadata->m_FreeCount;
    13212  const VkDeviceSize freeSize = it->offset - offset;
    13213  VmaSuballocation suballoc = {
    13214  offset, // offset
    13215  freeSize, // size
    13216  VMA_NULL, // hAllocation
    13217  VMA_SUBALLOCATION_TYPE_FREE };
    13218  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13219  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13220  {
    13221  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
    13222  }
    13223  }
    13224 
    13225  pMetadata->m_SumFreeSize -= it->size;
    13226  offset = it->offset + it->size;
    13227  }
    13228 
    13229  // Need to insert trailing free space.
    13230  if(offset < blockSize)
    13231  {
    13232  ++pMetadata->m_FreeCount;
    13233  const VkDeviceSize freeSize = blockSize - offset;
    13234  VmaSuballocation suballoc = {
    13235  offset, // offset
    13236  freeSize, // size
    13237  VMA_NULL, // hAllocation
    13238  VMA_SUBALLOCATION_TYPE_FREE };
    13239  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
    13240  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
    13241  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
    13242  {
    13243  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
    13244  }
    13245  }
    13246 
    13247  VMA_SORT(
    13248  pMetadata->m_FreeSuballocationsBySize.begin(),
    13249  pMetadata->m_FreeSuballocationsBySize.end(),
    13250  VmaSuballocationItemSizeLess());
    13251  }
    13252 
    13253  VMA_HEAVY_ASSERT(pMetadata->Validate());
    13254  }
    13255 }
    13256 
    13257 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
    13258 {
    13259  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
    13260  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
    13261  while(it != pMetadata->m_Suballocations.end())
    13262  {
    13263  if(it->offset < suballoc.offset)
    13264  {
    13265  ++it;
    13266  }
    13267  }
    13268  pMetadata->m_Suballocations.insert(it, suballoc);
    13269 }
    13270 
    13272 // VmaBlockVectorDefragmentationContext
    13273 
    13274 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
    13275  VmaAllocator hAllocator,
    13276  VmaPool hCustomPool,
    13277  VmaBlockVector* pBlockVector,
    13278  uint32_t currFrameIndex,
    13279  uint32_t algorithmFlags) :
    13280  res(VK_SUCCESS),
    13281  mutexLocked(false),
    13282  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
    13283  m_hAllocator(hAllocator),
    13284  m_hCustomPool(hCustomPool),
    13285  m_pBlockVector(pBlockVector),
    13286  m_CurrFrameIndex(currFrameIndex),
    13287  m_AlgorithmFlags(algorithmFlags),
    13288  m_pAlgorithm(VMA_NULL),
    13289  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
    13290  m_AllAllocations(false)
    13291 {
    13292 }
    13293 
    13294 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
    13295 {
    13296  vma_delete(m_hAllocator, m_pAlgorithm);
    13297 }
    13298 
    13299 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
    13300 {
    13301  AllocInfo info = { hAlloc, pChanged };
    13302  m_Allocations.push_back(info);
    13303 }
    13304 
    13305 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported)
    13306 {
    13307  const bool allAllocations = m_AllAllocations ||
    13308  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
    13309 
    13310  /********************************
    13311  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
    13312  ********************************/
    13313 
    13314  /*
    13315  Fast algorithm is supported only when certain criteria are met:
    13316  - VMA_DEBUG_MARGIN is 0.
    13317  - All allocations in this block vector are moveable.
    13318  - There is no possibility of image/buffer granularity conflict.
    13319  */
    13320  if(VMA_DEBUG_MARGIN == 0 &&
    13321  allAllocations &&
    13322  !m_pBlockVector->IsBufferImageGranularityConflictPossible())
    13323  {
    13324  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
    13325  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13326  }
    13327  else
    13328  {
    13329  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
    13330  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
    13331  }
    13332 
    13333  if(allAllocations)
    13334  {
    13335  m_pAlgorithm->AddAll();
    13336  }
    13337  else
    13338  {
    13339  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
    13340  {
    13341  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
    13342  }
    13343  }
    13344 }
    13345 
    13347 // VmaDefragmentationContext
    13348 
    13349 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
    13350  VmaAllocator hAllocator,
    13351  uint32_t currFrameIndex,
    13352  uint32_t flags,
    13353  VmaDefragmentationStats* pStats) :
    13354  m_hAllocator(hAllocator),
    13355  m_CurrFrameIndex(currFrameIndex),
    13356  m_Flags(flags),
    13357  m_pStats(pStats),
    13358  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
    13359 {
    13360  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
    13361 }
    13362 
    13363 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
    13364 {
    13365  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13366  {
    13367  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
    13368  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13369  vma_delete(m_hAllocator, pBlockVectorCtx);
    13370  }
    13371  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
    13372  {
    13373  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
    13374  if(pBlockVectorCtx)
    13375  {
    13376  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_pStats);
    13377  vma_delete(m_hAllocator, pBlockVectorCtx);
    13378  }
    13379  }
    13380 }
    13381 
    13382 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, VmaPool* pPools)
    13383 {
    13384  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    13385  {
    13386  VmaPool pool = pPools[poolIndex];
    13387  VMA_ASSERT(pool);
    13388  // Pools with algorithm other than default are not defragmented.
    13389  if(pool->m_BlockVector.GetAlgorithm() == 0)
    13390  {
    13391  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13392 
    13393  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13394  {
    13395  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
    13396  {
    13397  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13398  break;
    13399  }
    13400  }
    13401 
    13402  if(!pBlockVectorDefragCtx)
    13403  {
    13404  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13405  m_hAllocator,
    13406  pool,
    13407  &pool->m_BlockVector,
    13408  m_CurrFrameIndex,
    13409  m_Flags);
    13410  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13411  }
    13412 
    13413  pBlockVectorDefragCtx->AddAll();
    13414  }
    13415  }
    13416 }
    13417 
    13418 void VmaDefragmentationContext_T::AddAllocations(
    13419  uint32_t allocationCount,
    13420  VmaAllocation* pAllocations,
    13421  VkBool32* pAllocationsChanged)
    13422 {
    13423  // Dispatch pAllocations among defragmentators. Create them when necessary.
    13424  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    13425  {
    13426  const VmaAllocation hAlloc = pAllocations[allocIndex];
    13427  VMA_ASSERT(hAlloc);
    13428  // DedicatedAlloc cannot be defragmented.
    13429  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
    13430  // Lost allocation cannot be defragmented.
    13431  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
    13432  {
    13433  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
    13434 
    13435  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
    13436  // This allocation belongs to custom pool.
    13437  if(hAllocPool != VK_NULL_HANDLE)
    13438  {
    13439  // Pools with algorithm other than default are not defragmented.
    13440  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
    13441  {
    13442  for(size_t i = m_CustomPoolContexts.size(); i--; )
    13443  {
    13444  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
    13445  {
    13446  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
    13447  break;
    13448  }
    13449  }
    13450  if(!pBlockVectorDefragCtx)
    13451  {
    13452  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13453  m_hAllocator,
    13454  hAllocPool,
    13455  &hAllocPool->m_BlockVector,
    13456  m_CurrFrameIndex,
    13457  m_Flags);
    13458  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
    13459  }
    13460  }
    13461  }
    13462  // This allocation belongs to default pool.
    13463  else
    13464  {
    13465  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
    13466  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
    13467  if(!pBlockVectorDefragCtx)
    13468  {
    13469  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
    13470  m_hAllocator,
    13471  VMA_NULL, // hCustomPool
    13472  m_hAllocator->m_pBlockVectors[memTypeIndex],
    13473  m_CurrFrameIndex,
    13474  m_Flags);
    13475  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
    13476  }
    13477  }
    13478 
    13479  if(pBlockVectorDefragCtx)
    13480  {
    13481  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
    13482  &pAllocationsChanged[allocIndex] : VMA_NULL;
    13483  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
    13484  }
    13485  }
    13486  }
    13487 }
    13488 
    13489 VkResult VmaDefragmentationContext_T::Defragment(
    13490  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
    13491  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
    13492  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats)
    13493 {
    13494  if(pStats)
    13495  {
    13496  memset(pStats, 0, sizeof(VmaDefragmentationStats));
    13497  }
    13498 
    13499  if(commandBuffer == VK_NULL_HANDLE)
    13500  {
    13501  maxGpuBytesToMove = 0;
    13502  maxGpuAllocationsToMove = 0;
    13503  }
    13504 
    13505  VkResult res = VK_SUCCESS;
    13506 
    13507  // Process default pools.
    13508  for(uint32_t memTypeIndex = 0;
    13509  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
    13510  ++memTypeIndex)
    13511  {
    13512  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
    13513  if(pBlockVectorCtx)
    13514  {
    13515  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
    13516  pBlockVectorCtx->GetBlockVector()->Defragment(
    13517  pBlockVectorCtx,
    13518  pStats,
    13519  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13520  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13521  commandBuffer);
    13522  if(pBlockVectorCtx->res != VK_SUCCESS)
    13523  {
    13524  res = pBlockVectorCtx->res;
    13525  }
    13526  }
    13527  }
    13528 
    13529  // Process custom pools.
    13530  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
    13531  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
    13532  ++customCtxIndex)
    13533  {
    13534  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
    13535  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
    13536  pBlockVectorCtx->GetBlockVector()->Defragment(
    13537  pBlockVectorCtx,
    13538  pStats,
    13539  maxCpuBytesToMove, maxCpuAllocationsToMove,
    13540  maxGpuBytesToMove, maxGpuAllocationsToMove,
    13541  commandBuffer);
    13542  if(pBlockVectorCtx->res != VK_SUCCESS)
    13543  {
    13544  res = pBlockVectorCtx->res;
    13545  }
    13546  }
    13547 
    13548  return res;
    13549 }
    13550 
    13552 // VmaRecorder
    13553 
    13554 #if VMA_RECORDING_ENABLED
    13555 
    13556 VmaRecorder::VmaRecorder() :
    13557  m_UseMutex(true),
    13558  m_Flags(0),
    13559  m_File(VMA_NULL),
    13560  m_Freq(INT64_MAX),
    13561  m_StartCounter(INT64_MAX)
    13562 {
    13563 }
    13564 
    13565 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
    13566 {
    13567  m_UseMutex = useMutex;
    13568  m_Flags = settings.flags;
    13569 
    13570  QueryPerformanceFrequency((LARGE_INTEGER*)&m_Freq);
    13571  QueryPerformanceCounter((LARGE_INTEGER*)&m_StartCounter);
    13572 
    13573  // Open file for writing.
    13574  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
    13575  if(err != 0)
    13576  {
    13577  return VK_ERROR_INITIALIZATION_FAILED;
    13578  }
    13579 
    13580  // Write header.
    13581  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
    13582  fprintf(m_File, "%s\n", "1,5");
    13583 
    13584  return VK_SUCCESS;
    13585 }
    13586 
    13587 VmaRecorder::~VmaRecorder()
    13588 {
    13589  if(m_File != VMA_NULL)
    13590  {
    13591  fclose(m_File);
    13592  }
    13593 }
    13594 
    13595 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
    13596 {
    13597  CallParams callParams;
    13598  GetBasicParams(callParams);
    13599 
    13600  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13601  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13602  Flush();
    13603 }
    13604 
    13605 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
    13606 {
    13607  CallParams callParams;
    13608  GetBasicParams(callParams);
    13609 
    13610  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13611  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
    13612  Flush();
    13613 }
    13614 
    13615 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
    13616 {
    13617  CallParams callParams;
    13618  GetBasicParams(callParams);
    13619 
    13620  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13621  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
    13622  createInfo.memoryTypeIndex,
    13623  createInfo.flags,
    13624  createInfo.blockSize,
    13625  (uint64_t)createInfo.minBlockCount,
    13626  (uint64_t)createInfo.maxBlockCount,
    13627  createInfo.frameInUseCount,
    13628  pool);
    13629  Flush();
    13630 }
    13631 
    13632 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
    13633 {
    13634  CallParams callParams;
    13635  GetBasicParams(callParams);
    13636 
    13637  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13638  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
    13639  pool);
    13640  Flush();
    13641 }
    13642 
    13643 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
    13644  const VkMemoryRequirements& vkMemReq,
    13645  const VmaAllocationCreateInfo& createInfo,
    13646  VmaAllocation allocation)
    13647 {
    13648  CallParams callParams;
    13649  GetBasicParams(callParams);
    13650 
    13651  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13652  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13653  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13654  vkMemReq.size,
    13655  vkMemReq.alignment,
    13656  vkMemReq.memoryTypeBits,
    13657  createInfo.flags,
    13658  createInfo.usage,
    13659  createInfo.requiredFlags,
    13660  createInfo.preferredFlags,
    13661  createInfo.memoryTypeBits,
    13662  createInfo.pool,
    13663  allocation,
    13664  userDataStr.GetString());
    13665  Flush();
    13666 }
    13667 
    13668 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
    13669  const VkMemoryRequirements& vkMemReq,
    13670  const VmaAllocationCreateInfo& createInfo,
    13671  uint64_t allocationCount,
    13672  const VmaAllocation* pAllocations)
    13673 {
    13674  CallParams callParams;
    13675  GetBasicParams(callParams);
    13676 
    13677  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13678  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13679  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
    13680  vkMemReq.size,
    13681  vkMemReq.alignment,
    13682  vkMemReq.memoryTypeBits,
    13683  createInfo.flags,
    13684  createInfo.usage,
    13685  createInfo.requiredFlags,
    13686  createInfo.preferredFlags,
    13687  createInfo.memoryTypeBits,
    13688  createInfo.pool);
    13689  PrintPointerList(allocationCount, pAllocations);
    13690  fprintf(m_File, ",%s\n", userDataStr.GetString());
    13691  Flush();
    13692 }
    13693 
    13694 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
    13695  const VkMemoryRequirements& vkMemReq,
    13696  bool requiresDedicatedAllocation,
    13697  bool prefersDedicatedAllocation,
    13698  const VmaAllocationCreateInfo& createInfo,
    13699  VmaAllocation allocation)
    13700 {
    13701  CallParams callParams;
    13702  GetBasicParams(callParams);
    13703 
    13704  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13705  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13706  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13707  vkMemReq.size,
    13708  vkMemReq.alignment,
    13709  vkMemReq.memoryTypeBits,
    13710  requiresDedicatedAllocation ? 1 : 0,
    13711  prefersDedicatedAllocation ? 1 : 0,
    13712  createInfo.flags,
    13713  createInfo.usage,
    13714  createInfo.requiredFlags,
    13715  createInfo.preferredFlags,
    13716  createInfo.memoryTypeBits,
    13717  createInfo.pool,
    13718  allocation,
    13719  userDataStr.GetString());
    13720  Flush();
    13721 }
    13722 
    13723 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
    13724  const VkMemoryRequirements& vkMemReq,
    13725  bool requiresDedicatedAllocation,
    13726  bool prefersDedicatedAllocation,
    13727  const VmaAllocationCreateInfo& createInfo,
    13728  VmaAllocation allocation)
    13729 {
    13730  CallParams callParams;
    13731  GetBasicParams(callParams);
    13732 
    13733  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13734  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
    13735  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13736  vkMemReq.size,
    13737  vkMemReq.alignment,
    13738  vkMemReq.memoryTypeBits,
    13739  requiresDedicatedAllocation ? 1 : 0,
    13740  prefersDedicatedAllocation ? 1 : 0,
    13741  createInfo.flags,
    13742  createInfo.usage,
    13743  createInfo.requiredFlags,
    13744  createInfo.preferredFlags,
    13745  createInfo.memoryTypeBits,
    13746  createInfo.pool,
    13747  allocation,
    13748  userDataStr.GetString());
    13749  Flush();
    13750 }
    13751 
    13752 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
    13753  VmaAllocation allocation)
    13754 {
    13755  CallParams callParams;
    13756  GetBasicParams(callParams);
    13757 
    13758  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13759  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13760  allocation);
    13761  Flush();
    13762 }
    13763 
    13764 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
    13765  uint64_t allocationCount,
    13766  const VmaAllocation* pAllocations)
    13767 {
    13768  CallParams callParams;
    13769  GetBasicParams(callParams);
    13770 
    13771  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13772  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
    13773  PrintPointerList(allocationCount, pAllocations);
    13774  fprintf(m_File, "\n");
    13775  Flush();
    13776 }
    13777 
    13778 void VmaRecorder::RecordResizeAllocation(
    13779  uint32_t frameIndex,
    13780  VmaAllocation allocation,
    13781  VkDeviceSize newSize)
    13782 {
    13783  CallParams callParams;
    13784  GetBasicParams(callParams);
    13785 
    13786  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13787  fprintf(m_File, "%u,%.3f,%u,vmaResizeAllocation,%p,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13788  allocation, newSize);
    13789  Flush();
    13790 }
    13791 
    13792 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
    13793  VmaAllocation allocation,
    13794  const void* pUserData)
    13795 {
    13796  CallParams callParams;
    13797  GetBasicParams(callParams);
    13798 
    13799  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13800  UserDataString userDataStr(
    13801  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
    13802  pUserData);
    13803  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13804  allocation,
    13805  userDataStr.GetString());
    13806  Flush();
    13807 }
    13808 
    13809 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
    13810  VmaAllocation allocation)
    13811 {
    13812  CallParams callParams;
    13813  GetBasicParams(callParams);
    13814 
    13815  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13816  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13817  allocation);
    13818  Flush();
    13819 }
    13820 
    13821 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
    13822  VmaAllocation allocation)
    13823 {
    13824  CallParams callParams;
    13825  GetBasicParams(callParams);
    13826 
    13827  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13828  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13829  allocation);
    13830  Flush();
    13831 }
    13832 
    13833 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
    13834  VmaAllocation allocation)
    13835 {
    13836  CallParams callParams;
    13837  GetBasicParams(callParams);
    13838 
    13839  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13840  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
    13841  allocation);
    13842  Flush();
    13843 }
    13844 
    13845 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
    13846  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13847 {
    13848  CallParams callParams;
    13849  GetBasicParams(callParams);
    13850 
    13851  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13852  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13853  allocation,
    13854  offset,
    13855  size);
    13856  Flush();
    13857 }
    13858 
    13859 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
    13860  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    13861 {
    13862  CallParams callParams;
    13863  GetBasicParams(callParams);
    13864 
    13865  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13866  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
    13867  allocation,
    13868  offset,
    13869  size);
    13870  Flush();
    13871 }
    13872 
    13873 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
    13874  const VkBufferCreateInfo& bufCreateInfo,
    13875  const VmaAllocationCreateInfo& allocCreateInfo,
    13876  VmaAllocation allocation)
    13877 {
    13878  CallParams callParams;
    13879  GetBasicParams(callParams);
    13880 
    13881  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13882  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13883  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13884  bufCreateInfo.flags,
    13885  bufCreateInfo.size,
    13886  bufCreateInfo.usage,
    13887  bufCreateInfo.sharingMode,
    13888  allocCreateInfo.flags,
    13889  allocCreateInfo.usage,
    13890  allocCreateInfo.requiredFlags,
    13891  allocCreateInfo.preferredFlags,
    13892  allocCreateInfo.memoryTypeBits,
    13893  allocCreateInfo.pool,
    13894  allocation,
    13895  userDataStr.GetString());
    13896  Flush();
    13897 }
    13898 
    13899 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
    13900  const VkImageCreateInfo& imageCreateInfo,
    13901  const VmaAllocationCreateInfo& allocCreateInfo,
    13902  VmaAllocation allocation)
    13903 {
    13904  CallParams callParams;
    13905  GetBasicParams(callParams);
    13906 
    13907  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13908  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
    13909  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
    13910  imageCreateInfo.flags,
    13911  imageCreateInfo.imageType,
    13912  imageCreateInfo.format,
    13913  imageCreateInfo.extent.width,
    13914  imageCreateInfo.extent.height,
    13915  imageCreateInfo.extent.depth,
    13916  imageCreateInfo.mipLevels,
    13917  imageCreateInfo.arrayLayers,
    13918  imageCreateInfo.samples,
    13919  imageCreateInfo.tiling,
    13920  imageCreateInfo.usage,
    13921  imageCreateInfo.sharingMode,
    13922  imageCreateInfo.initialLayout,
    13923  allocCreateInfo.flags,
    13924  allocCreateInfo.usage,
    13925  allocCreateInfo.requiredFlags,
    13926  allocCreateInfo.preferredFlags,
    13927  allocCreateInfo.memoryTypeBits,
    13928  allocCreateInfo.pool,
    13929  allocation,
    13930  userDataStr.GetString());
    13931  Flush();
    13932 }
    13933 
    13934 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
    13935  VmaAllocation allocation)
    13936 {
    13937  CallParams callParams;
    13938  GetBasicParams(callParams);
    13939 
    13940  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13941  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
    13942  allocation);
    13943  Flush();
    13944 }
    13945 
    13946 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
    13947  VmaAllocation allocation)
    13948 {
    13949  CallParams callParams;
    13950  GetBasicParams(callParams);
    13951 
    13952  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13953  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
    13954  allocation);
    13955  Flush();
    13956 }
    13957 
    13958 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
    13959  VmaAllocation allocation)
    13960 {
    13961  CallParams callParams;
    13962  GetBasicParams(callParams);
    13963 
    13964  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13965  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
    13966  allocation);
    13967  Flush();
    13968 }
    13969 
    13970 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
    13971  VmaAllocation allocation)
    13972 {
    13973  CallParams callParams;
    13974  GetBasicParams(callParams);
    13975 
    13976  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13977  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
    13978  allocation);
    13979  Flush();
    13980 }
    13981 
    13982 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
    13983  VmaPool pool)
    13984 {
    13985  CallParams callParams;
    13986  GetBasicParams(callParams);
    13987 
    13988  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    13989  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
    13990  pool);
    13991  Flush();
    13992 }
    13993 
    13994 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
    13995  const VmaDefragmentationInfo2& info,
    13997 {
    13998  CallParams callParams;
    13999  GetBasicParams(callParams);
    14000 
    14001  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14002  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
    14003  info.flags);
    14004  PrintPointerList(info.allocationCount, info.pAllocations);
    14005  fprintf(m_File, ",");
    14006  PrintPointerList(info.poolCount, info.pPools);
    14007  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
    14008  info.maxCpuBytesToMove,
    14010  info.maxGpuBytesToMove,
    14012  info.commandBuffer,
    14013  ctx);
    14014  Flush();
    14015 }
    14016 
    14017 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
    14019 {
    14020  CallParams callParams;
    14021  GetBasicParams(callParams);
    14022 
    14023  VmaMutexLock lock(m_FileMutex, m_UseMutex);
    14024  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
    14025  ctx);
    14026  Flush();
    14027 }
    14028 
    14029 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
    14030 {
    14031  if(pUserData != VMA_NULL)
    14032  {
    14033  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
    14034  {
    14035  m_Str = (const char*)pUserData;
    14036  }
    14037  else
    14038  {
    14039  sprintf_s(m_PtrStr, "%p", pUserData);
    14040  m_Str = m_PtrStr;
    14041  }
    14042  }
    14043  else
    14044  {
    14045  m_Str = "";
    14046  }
    14047 }
    14048 
    14049 void VmaRecorder::WriteConfiguration(
    14050  const VkPhysicalDeviceProperties& devProps,
    14051  const VkPhysicalDeviceMemoryProperties& memProps,
    14052  bool dedicatedAllocationExtensionEnabled)
    14053 {
    14054  fprintf(m_File, "Config,Begin\n");
    14055 
    14056  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
    14057  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
    14058  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
    14059  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
    14060  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
    14061  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
    14062 
    14063  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
    14064  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
    14065  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
    14066 
    14067  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
    14068  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
    14069  {
    14070  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
    14071  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
    14072  }
    14073  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
    14074  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
    14075  {
    14076  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
    14077  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
    14078  }
    14079 
    14080  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
    14081 
    14082  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
    14083  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
    14084  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
    14085  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
    14086  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
    14087  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
    14088  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
    14089  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
    14090  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14091 
    14092  fprintf(m_File, "Config,End\n");
    14093 }
    14094 
    14095 void VmaRecorder::GetBasicParams(CallParams& outParams)
    14096 {
    14097  outParams.threadId = GetCurrentThreadId();
    14098 
    14099  LARGE_INTEGER counter;
    14100  QueryPerformanceCounter(&counter);
    14101  outParams.time = (double)(counter.QuadPart - m_StartCounter) / (double)m_Freq;
    14102 }
    14103 
    14104 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
    14105 {
    14106  if(count)
    14107  {
    14108  fprintf(m_File, "%p", pItems[0]);
    14109  for(uint64_t i = 1; i < count; ++i)
    14110  {
    14111  fprintf(m_File, " %p", pItems[i]);
    14112  }
    14113  }
    14114 }
    14115 
    14116 void VmaRecorder::Flush()
    14117 {
    14118  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
    14119  {
    14120  fflush(m_File);
    14121  }
    14122 }
    14123 
    14124 #endif // #if VMA_RECORDING_ENABLED
    14125 
    14127 // VmaAllocationObjectAllocator
    14128 
    14129 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
    14130  m_Allocator(pAllocationCallbacks, 1024)
    14131 {
    14132 }
    14133 
    14134 VmaAllocation VmaAllocationObjectAllocator::Allocate()
    14135 {
    14136  VmaMutexLock mutexLock(m_Mutex);
    14137  return m_Allocator.Alloc();
    14138 }
    14139 
    14140 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
    14141 {
    14142  VmaMutexLock mutexLock(m_Mutex);
    14143  m_Allocator.Free(hAlloc);
    14144 }
    14145 
    14147 // VmaAllocator_T
    14148 
    14149 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
    14150  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
    14151  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
    14152  m_hDevice(pCreateInfo->device),
    14153  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
    14154  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
    14155  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
    14156  m_AllocationObjectAllocator(&m_AllocationCallbacks),
    14157  m_PreferredLargeHeapBlockSize(0),
    14158  m_PhysicalDevice(pCreateInfo->physicalDevice),
    14159  m_CurrentFrameIndex(0),
    14160  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
    14161  m_NextPoolId(0)
    14163  ,m_pRecorder(VMA_NULL)
    14164 #endif
    14165 {
    14166  if(VMA_DEBUG_DETECT_CORRUPTION)
    14167  {
    14168  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
    14169  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
    14170  }
    14171 
    14172  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device);
    14173 
    14174 #if !(VMA_DEDICATED_ALLOCATION)
    14176  {
    14177  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
    14178  }
    14179 #endif
    14180 
    14181  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
    14182  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
    14183  memset(&m_MemProps, 0, sizeof(m_MemProps));
    14184 
    14185  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
    14186  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
    14187 
    14188  for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14189  {
    14190  m_HeapSizeLimit[i] = VK_WHOLE_SIZE;
    14191  }
    14192 
    14193  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
    14194  {
    14195  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
    14196  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
    14197  }
    14198 
    14199  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
    14200 
    14201  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
    14202  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
    14203 
    14204  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
    14205  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
    14206  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
    14207  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
    14208 
    14209  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
    14210  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
    14211 
    14212  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
    14213  {
    14214  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
    14215  {
    14216  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
    14217  if(limit != VK_WHOLE_SIZE)
    14218  {
    14219  m_HeapSizeLimit[heapIndex] = limit;
    14220  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
    14221  {
    14222  m_MemProps.memoryHeaps[heapIndex].size = limit;
    14223  }
    14224  }
    14225  }
    14226  }
    14227 
    14228  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14229  {
    14230  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
    14231 
    14232  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
    14233  this,
    14234  VK_NULL_HANDLE, // hParentPool
    14235  memTypeIndex,
    14236  preferredBlockSize,
    14237  0,
    14238  SIZE_MAX,
    14239  GetBufferImageGranularity(),
    14240  pCreateInfo->frameInUseCount,
    14241  false, // isCustomPool
    14242  false, // explicitBlockSize
    14243  false); // linearAlgorithm
    14244  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
    14245  // becase minBlockCount is 0.
    14246  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
    14247 
    14248  }
    14249 }
    14250 
    14251 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
    14252 {
    14253  VkResult res = VK_SUCCESS;
    14254 
    14255  if(pCreateInfo->pRecordSettings != VMA_NULL &&
    14256  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
    14257  {
    14258 #if VMA_RECORDING_ENABLED
    14259  m_pRecorder = vma_new(this, VmaRecorder)();
    14260  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
    14261  if(res != VK_SUCCESS)
    14262  {
    14263  return res;
    14264  }
    14265  m_pRecorder->WriteConfiguration(
    14266  m_PhysicalDeviceProperties,
    14267  m_MemProps,
    14268  m_UseKhrDedicatedAllocation);
    14269  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
    14270 #else
    14271  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
    14272  return VK_ERROR_FEATURE_NOT_PRESENT;
    14273 #endif
    14274  }
    14275 
    14276  return res;
    14277 }
    14278 
    14279 VmaAllocator_T::~VmaAllocator_T()
    14280 {
    14281 #if VMA_RECORDING_ENABLED
    14282  if(m_pRecorder != VMA_NULL)
    14283  {
    14284  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
    14285  vma_delete(this, m_pRecorder);
    14286  }
    14287 #endif
    14288 
    14289  VMA_ASSERT(m_Pools.empty());
    14290 
    14291  for(size_t i = GetMemoryTypeCount(); i--; )
    14292  {
    14293  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
    14294  {
    14295  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
    14296  }
    14297 
    14298  vma_delete(this, m_pDedicatedAllocations[i]);
    14299  vma_delete(this, m_pBlockVectors[i]);
    14300  }
    14301 }
    14302 
    14303 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
    14304 {
    14305 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14306  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
    14307  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
    14308  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
    14309  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
    14310  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
    14311  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
    14312  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
    14313  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
    14314  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
    14315  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
    14316  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
    14317  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
    14318  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
    14319  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
    14320  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
    14321  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
    14322  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
    14323 #if VMA_DEDICATED_ALLOCATION
    14324  if(m_UseKhrDedicatedAllocation)
    14325  {
    14326  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR =
    14327  (PFN_vkGetBufferMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetBufferMemoryRequirements2KHR");
    14328  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR =
    14329  (PFN_vkGetImageMemoryRequirements2KHR)vkGetDeviceProcAddr(m_hDevice, "vkGetImageMemoryRequirements2KHR");
    14330  }
    14331 #endif // #if VMA_DEDICATED_ALLOCATION
    14332 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
    14333 
    14334 #define VMA_COPY_IF_NOT_NULL(funcName) \
    14335  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
    14336 
    14337  if(pVulkanFunctions != VMA_NULL)
    14338  {
    14339  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
    14340  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
    14341  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
    14342  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
    14343  VMA_COPY_IF_NOT_NULL(vkMapMemory);
    14344  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
    14345  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
    14346  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
    14347  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
    14348  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
    14349  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
    14350  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
    14351  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
    14352  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
    14353  VMA_COPY_IF_NOT_NULL(vkCreateImage);
    14354  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
    14355  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
    14356 #if VMA_DEDICATED_ALLOCATION
    14357  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
    14358  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
    14359 #endif
    14360  }
    14361 
    14362 #undef VMA_COPY_IF_NOT_NULL
    14363 
    14364  // If these asserts are hit, you must either #define VMA_STATIC_VULKAN_FUNCTIONS 1
    14365  // or pass valid pointers as VmaAllocatorCreateInfo::pVulkanFunctions.
    14366  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
    14367  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
    14368  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
    14369  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
    14370  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
    14371  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
    14372  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
    14373  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
    14374  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
    14375  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
    14376  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
    14377  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
    14378  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
    14379  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
    14380  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
    14381  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
    14382  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
    14383 #if VMA_DEDICATED_ALLOCATION
    14384  if(m_UseKhrDedicatedAllocation)
    14385  {
    14386  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
    14387  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
    14388  }
    14389 #endif
    14390 }
    14391 
    14392 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
    14393 {
    14394  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14395  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
    14396  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
    14397  return isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize;
    14398 }
    14399 
    14400 VkResult VmaAllocator_T::AllocateMemoryOfType(
    14401  VkDeviceSize size,
    14402  VkDeviceSize alignment,
    14403  bool dedicatedAllocation,
    14404  VkBuffer dedicatedBuffer,
    14405  VkImage dedicatedImage,
    14406  const VmaAllocationCreateInfo& createInfo,
    14407  uint32_t memTypeIndex,
    14408  VmaSuballocationType suballocType,
    14409  size_t allocationCount,
    14410  VmaAllocation* pAllocations)
    14411 {
    14412  VMA_ASSERT(pAllocations != VMA_NULL);
    14413  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
    14414 
    14415  VmaAllocationCreateInfo finalCreateInfo = createInfo;
    14416 
    14417  // If memory type is not HOST_VISIBLE, disable MAPPED.
    14418  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14419  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    14420  {
    14421  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
    14422  }
    14423 
    14424  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
    14425  VMA_ASSERT(blockVector);
    14426 
    14427  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
    14428  bool preferDedicatedMemory =
    14429  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
    14430  dedicatedAllocation ||
    14431  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
    14432  size > preferredBlockSize / 2;
    14433 
    14434  if(preferDedicatedMemory &&
    14435  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
    14436  finalCreateInfo.pool == VK_NULL_HANDLE)
    14437  {
    14439  }
    14440 
    14441  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
    14442  {
    14443  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14444  {
    14445  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14446  }
    14447  else
    14448  {
    14449  return AllocateDedicatedMemory(
    14450  size,
    14451  suballocType,
    14452  memTypeIndex,
    14453  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14454  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14455  finalCreateInfo.pUserData,
    14456  dedicatedBuffer,
    14457  dedicatedImage,
    14458  allocationCount,
    14459  pAllocations);
    14460  }
    14461  }
    14462  else
    14463  {
    14464  VkResult res = blockVector->Allocate(
    14465  m_CurrentFrameIndex.load(),
    14466  size,
    14467  alignment,
    14468  finalCreateInfo,
    14469  suballocType,
    14470  allocationCount,
    14471  pAllocations);
    14472  if(res == VK_SUCCESS)
    14473  {
    14474  return res;
    14475  }
    14476 
    14477  // 5. Try dedicated memory.
    14478  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14479  {
    14480  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14481  }
    14482  else
    14483  {
    14484  res = AllocateDedicatedMemory(
    14485  size,
    14486  suballocType,
    14487  memTypeIndex,
    14488  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
    14489  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
    14490  finalCreateInfo.pUserData,
    14491  dedicatedBuffer,
    14492  dedicatedImage,
    14493  allocationCount,
    14494  pAllocations);
    14495  if(res == VK_SUCCESS)
    14496  {
    14497  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
    14498  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
    14499  return VK_SUCCESS;
    14500  }
    14501  else
    14502  {
    14503  // Everything failed: Return error code.
    14504  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14505  return res;
    14506  }
    14507  }
    14508  }
    14509 }
    14510 
    14511 VkResult VmaAllocator_T::AllocateDedicatedMemory(
    14512  VkDeviceSize size,
    14513  VmaSuballocationType suballocType,
    14514  uint32_t memTypeIndex,
    14515  bool map,
    14516  bool isUserDataString,
    14517  void* pUserData,
    14518  VkBuffer dedicatedBuffer,
    14519  VkImage dedicatedImage,
    14520  size_t allocationCount,
    14521  VmaAllocation* pAllocations)
    14522 {
    14523  VMA_ASSERT(allocationCount > 0 && pAllocations);
    14524 
    14525  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
    14526  allocInfo.memoryTypeIndex = memTypeIndex;
    14527  allocInfo.allocationSize = size;
    14528 
    14529 #if VMA_DEDICATED_ALLOCATION
    14530  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
    14531  if(m_UseKhrDedicatedAllocation)
    14532  {
    14533  if(dedicatedBuffer != VK_NULL_HANDLE)
    14534  {
    14535  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
    14536  dedicatedAllocInfo.buffer = dedicatedBuffer;
    14537  allocInfo.pNext = &dedicatedAllocInfo;
    14538  }
    14539  else if(dedicatedImage != VK_NULL_HANDLE)
    14540  {
    14541  dedicatedAllocInfo.image = dedicatedImage;
    14542  allocInfo.pNext = &dedicatedAllocInfo;
    14543  }
    14544  }
    14545 #endif // #if VMA_DEDICATED_ALLOCATION
    14546 
    14547  size_t allocIndex;
    14548  VkResult res = VK_SUCCESS;
    14549  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14550  {
    14551  res = AllocateDedicatedMemoryPage(
    14552  size,
    14553  suballocType,
    14554  memTypeIndex,
    14555  allocInfo,
    14556  map,
    14557  isUserDataString,
    14558  pUserData,
    14559  pAllocations + allocIndex);
    14560  if(res != VK_SUCCESS)
    14561  {
    14562  break;
    14563  }
    14564  }
    14565 
    14566  if(res == VK_SUCCESS)
    14567  {
    14568  // Register them in m_pDedicatedAllocations.
    14569  {
    14570  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14571  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    14572  VMA_ASSERT(pDedicatedAllocations);
    14573  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
    14574  {
    14575  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
    14576  }
    14577  }
    14578 
    14579  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
    14580  }
    14581  else
    14582  {
    14583  // Free all already created allocations.
    14584  while(allocIndex--)
    14585  {
    14586  VmaAllocation currAlloc = pAllocations[allocIndex];
    14587  VkDeviceMemory hMemory = currAlloc->GetMemory();
    14588 
    14589  /*
    14590  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    14591  before vkFreeMemory.
    14592 
    14593  if(currAlloc->GetMappedData() != VMA_NULL)
    14594  {
    14595  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    14596  }
    14597  */
    14598 
    14599  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
    14600 
    14601  currAlloc->SetUserData(this, VMA_NULL);
    14602  currAlloc->Dtor();
    14603  m_AllocationObjectAllocator.Free(currAlloc);
    14604  }
    14605 
    14606  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14607  }
    14608 
    14609  return res;
    14610 }
    14611 
    14612 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
    14613  VkDeviceSize size,
    14614  VmaSuballocationType suballocType,
    14615  uint32_t memTypeIndex,
    14616  const VkMemoryAllocateInfo& allocInfo,
    14617  bool map,
    14618  bool isUserDataString,
    14619  void* pUserData,
    14620  VmaAllocation* pAllocation)
    14621 {
    14622  VkDeviceMemory hMemory = VK_NULL_HANDLE;
    14623  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
    14624  if(res < 0)
    14625  {
    14626  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
    14627  return res;
    14628  }
    14629 
    14630  void* pMappedData = VMA_NULL;
    14631  if(map)
    14632  {
    14633  res = (*m_VulkanFunctions.vkMapMemory)(
    14634  m_hDevice,
    14635  hMemory,
    14636  0,
    14637  VK_WHOLE_SIZE,
    14638  0,
    14639  &pMappedData);
    14640  if(res < 0)
    14641  {
    14642  VMA_DEBUG_LOG(" vkMapMemory FAILED");
    14643  FreeVulkanMemory(memTypeIndex, size, hMemory);
    14644  return res;
    14645  }
    14646  }
    14647 
    14648  *pAllocation = m_AllocationObjectAllocator.Allocate();
    14649  (*pAllocation)->Ctor(m_CurrentFrameIndex.load(), isUserDataString);
    14650  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
    14651  (*pAllocation)->SetUserData(this, pUserData);
    14652  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14653  {
    14654  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
    14655  }
    14656 
    14657  return VK_SUCCESS;
    14658 }
    14659 
    14660 void VmaAllocator_T::GetBufferMemoryRequirements(
    14661  VkBuffer hBuffer,
    14662  VkMemoryRequirements& memReq,
    14663  bool& requiresDedicatedAllocation,
    14664  bool& prefersDedicatedAllocation) const
    14665 {
    14666 #if VMA_DEDICATED_ALLOCATION
    14667  if(m_UseKhrDedicatedAllocation)
    14668  {
    14669  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14670  memReqInfo.buffer = hBuffer;
    14671 
    14672  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14673 
    14674  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14675  memReq2.pNext = &memDedicatedReq;
    14676 
    14677  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14678 
    14679  memReq = memReq2.memoryRequirements;
    14680  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14681  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14682  }
    14683  else
    14684 #endif // #if VMA_DEDICATED_ALLOCATION
    14685  {
    14686  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
    14687  requiresDedicatedAllocation = false;
    14688  prefersDedicatedAllocation = false;
    14689  }
    14690 }
    14691 
    14692 void VmaAllocator_T::GetImageMemoryRequirements(
    14693  VkImage hImage,
    14694  VkMemoryRequirements& memReq,
    14695  bool& requiresDedicatedAllocation,
    14696  bool& prefersDedicatedAllocation) const
    14697 {
    14698 #if VMA_DEDICATED_ALLOCATION
    14699  if(m_UseKhrDedicatedAllocation)
    14700  {
    14701  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
    14702  memReqInfo.image = hImage;
    14703 
    14704  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
    14705 
    14706  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
    14707  memReq2.pNext = &memDedicatedReq;
    14708 
    14709  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
    14710 
    14711  memReq = memReq2.memoryRequirements;
    14712  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
    14713  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
    14714  }
    14715  else
    14716 #endif // #if VMA_DEDICATED_ALLOCATION
    14717  {
    14718  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
    14719  requiresDedicatedAllocation = false;
    14720  prefersDedicatedAllocation = false;
    14721  }
    14722 }
    14723 
    14724 VkResult VmaAllocator_T::AllocateMemory(
    14725  const VkMemoryRequirements& vkMemReq,
    14726  bool requiresDedicatedAllocation,
    14727  bool prefersDedicatedAllocation,
    14728  VkBuffer dedicatedBuffer,
    14729  VkImage dedicatedImage,
    14730  const VmaAllocationCreateInfo& createInfo,
    14731  VmaSuballocationType suballocType,
    14732  size_t allocationCount,
    14733  VmaAllocation* pAllocations)
    14734 {
    14735  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
    14736 
    14737  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
    14738 
    14739  if(vkMemReq.size == 0)
    14740  {
    14741  return VK_ERROR_VALIDATION_FAILED_EXT;
    14742  }
    14743  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
    14744  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14745  {
    14746  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
    14747  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14748  }
    14749  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
    14751  {
    14752  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
    14753  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14754  }
    14755  if(requiresDedicatedAllocation)
    14756  {
    14757  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
    14758  {
    14759  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
    14760  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14761  }
    14762  if(createInfo.pool != VK_NULL_HANDLE)
    14763  {
    14764  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
    14765  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14766  }
    14767  }
    14768  if((createInfo.pool != VK_NULL_HANDLE) &&
    14769  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
    14770  {
    14771  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
    14772  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14773  }
    14774 
    14775  if(createInfo.pool != VK_NULL_HANDLE)
    14776  {
    14777  const VkDeviceSize alignmentForPool = VMA_MAX(
    14778  vkMemReq.alignment,
    14779  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
    14780  return createInfo.pool->m_BlockVector.Allocate(
    14781  m_CurrentFrameIndex.load(),
    14782  vkMemReq.size,
    14783  alignmentForPool,
    14784  createInfo,
    14785  suballocType,
    14786  allocationCount,
    14787  pAllocations);
    14788  }
    14789  else
    14790  {
    14791  // Bit mask of memory Vulkan types acceptable for this allocation.
    14792  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
    14793  uint32_t memTypeIndex = UINT32_MAX;
    14794  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14795  if(res == VK_SUCCESS)
    14796  {
    14797  VkDeviceSize alignmentForMemType = VMA_MAX(
    14798  vkMemReq.alignment,
    14799  GetMemoryTypeMinAlignment(memTypeIndex));
    14800 
    14801  res = AllocateMemoryOfType(
    14802  vkMemReq.size,
    14803  alignmentForMemType,
    14804  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14805  dedicatedBuffer,
    14806  dedicatedImage,
    14807  createInfo,
    14808  memTypeIndex,
    14809  suballocType,
    14810  allocationCount,
    14811  pAllocations);
    14812  // Succeeded on first try.
    14813  if(res == VK_SUCCESS)
    14814  {
    14815  return res;
    14816  }
    14817  // Allocation from this memory type failed. Try other compatible memory types.
    14818  else
    14819  {
    14820  for(;;)
    14821  {
    14822  // Remove old memTypeIndex from list of possibilities.
    14823  memoryTypeBits &= ~(1u << memTypeIndex);
    14824  // Find alternative memTypeIndex.
    14825  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
    14826  if(res == VK_SUCCESS)
    14827  {
    14828  alignmentForMemType = VMA_MAX(
    14829  vkMemReq.alignment,
    14830  GetMemoryTypeMinAlignment(memTypeIndex));
    14831 
    14832  res = AllocateMemoryOfType(
    14833  vkMemReq.size,
    14834  alignmentForMemType,
    14835  requiresDedicatedAllocation || prefersDedicatedAllocation,
    14836  dedicatedBuffer,
    14837  dedicatedImage,
    14838  createInfo,
    14839  memTypeIndex,
    14840  suballocType,
    14841  allocationCount,
    14842  pAllocations);
    14843  // Allocation from this alternative memory type succeeded.
    14844  if(res == VK_SUCCESS)
    14845  {
    14846  return res;
    14847  }
    14848  // else: Allocation from this memory type failed. Try next one - next loop iteration.
    14849  }
    14850  // No other matching memory type index could be found.
    14851  else
    14852  {
    14853  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
    14854  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
    14855  }
    14856  }
    14857  }
    14858  }
    14859  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
    14860  else
    14861  return res;
    14862  }
    14863 }
    14864 
    14865 void VmaAllocator_T::FreeMemory(
    14866  size_t allocationCount,
    14867  const VmaAllocation* pAllocations)
    14868 {
    14869  VMA_ASSERT(pAllocations);
    14870 
    14871  for(size_t allocIndex = allocationCount; allocIndex--; )
    14872  {
    14873  VmaAllocation allocation = pAllocations[allocIndex];
    14874 
    14875  if(allocation != VK_NULL_HANDLE)
    14876  {
    14877  if(TouchAllocation(allocation))
    14878  {
    14879  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
    14880  {
    14881  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
    14882  }
    14883 
    14884  switch(allocation->GetType())
    14885  {
    14886  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14887  {
    14888  VmaBlockVector* pBlockVector = VMA_NULL;
    14889  VmaPool hPool = allocation->GetBlock()->GetParentPool();
    14890  if(hPool != VK_NULL_HANDLE)
    14891  {
    14892  pBlockVector = &hPool->m_BlockVector;
    14893  }
    14894  else
    14895  {
    14896  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    14897  pBlockVector = m_pBlockVectors[memTypeIndex];
    14898  }
    14899  pBlockVector->Free(allocation);
    14900  }
    14901  break;
    14902  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14903  FreeDedicatedMemory(allocation);
    14904  break;
    14905  default:
    14906  VMA_ASSERT(0);
    14907  }
    14908  }
    14909 
    14910  allocation->SetUserData(this, VMA_NULL);
    14911  allocation->Dtor();
    14912  m_AllocationObjectAllocator.Free(allocation);
    14913  }
    14914  }
    14915 }
    14916 
    14917 VkResult VmaAllocator_T::ResizeAllocation(
    14918  const VmaAllocation alloc,
    14919  VkDeviceSize newSize)
    14920 {
    14921  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
    14922  {
    14923  return VK_ERROR_VALIDATION_FAILED_EXT;
    14924  }
    14925  if(newSize == alloc->GetSize())
    14926  {
    14927  return VK_SUCCESS;
    14928  }
    14929 
    14930  switch(alloc->GetType())
    14931  {
    14932  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    14933  return VK_ERROR_FEATURE_NOT_PRESENT;
    14934  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    14935  if(alloc->GetBlock()->m_pMetadata->ResizeAllocation(alloc, newSize))
    14936  {
    14937  alloc->ChangeSize(newSize);
    14938  VMA_HEAVY_ASSERT(alloc->GetBlock()->m_pMetadata->Validate());
    14939  return VK_SUCCESS;
    14940  }
    14941  else
    14942  {
    14943  return VK_ERROR_OUT_OF_POOL_MEMORY;
    14944  }
    14945  default:
    14946  VMA_ASSERT(0);
    14947  return VK_ERROR_VALIDATION_FAILED_EXT;
    14948  }
    14949 }
    14950 
    14951 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
    14952 {
    14953  // Initialize.
    14954  InitStatInfo(pStats->total);
    14955  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
    14956  InitStatInfo(pStats->memoryType[i]);
    14957  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
    14958  InitStatInfo(pStats->memoryHeap[i]);
    14959 
    14960  // Process default pools.
    14961  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14962  {
    14963  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    14964  VMA_ASSERT(pBlockVector);
    14965  pBlockVector->AddStats(pStats);
    14966  }
    14967 
    14968  // Process custom pools.
    14969  {
    14970  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    14971  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    14972  {
    14973  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
    14974  }
    14975  }
    14976 
    14977  // Process dedicated allocations.
    14978  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    14979  {
    14980  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
    14981  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    14982  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    14983  VMA_ASSERT(pDedicatedAllocVector);
    14984  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
    14985  {
    14986  VmaStatInfo allocationStatInfo;
    14987  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
    14988  VmaAddStatInfo(pStats->total, allocationStatInfo);
    14989  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
    14990  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
    14991  }
    14992  }
    14993 
    14994  // Postprocess.
    14995  VmaPostprocessCalcStatInfo(pStats->total);
    14996  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
    14997  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
    14998  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
    14999  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
    15000 }
    15001 
    15002 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
    15003 
    15004 VkResult VmaAllocator_T::DefragmentationBegin(
    15005  const VmaDefragmentationInfo2& info,
    15006  VmaDefragmentationStats* pStats,
    15007  VmaDefragmentationContext* pContext)
    15008 {
    15009  if(info.pAllocationsChanged != VMA_NULL)
    15010  {
    15011  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
    15012  }
    15013 
    15014  *pContext = vma_new(this, VmaDefragmentationContext_T)(
    15015  this, m_CurrentFrameIndex.load(), info.flags, pStats);
    15016 
    15017  (*pContext)->AddPools(info.poolCount, info.pPools);
    15018  (*pContext)->AddAllocations(
    15020 
    15021  VkResult res = (*pContext)->Defragment(
    15024  info.commandBuffer, pStats);
    15025 
    15026  if(res != VK_NOT_READY)
    15027  {
    15028  vma_delete(this, *pContext);
    15029  *pContext = VMA_NULL;
    15030  }
    15031 
    15032  return res;
    15033 }
    15034 
    15035 VkResult VmaAllocator_T::DefragmentationEnd(
    15036  VmaDefragmentationContext context)
    15037 {
    15038  vma_delete(this, context);
    15039  return VK_SUCCESS;
    15040 }
    15041 
    15042 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
    15043 {
    15044  if(hAllocation->CanBecomeLost())
    15045  {
    15046  /*
    15047  Warning: This is a carefully designed algorithm.
    15048  Do not modify unless you really know what you're doing :)
    15049  */
    15050  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15051  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15052  for(;;)
    15053  {
    15054  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15055  {
    15056  pAllocationInfo->memoryType = UINT32_MAX;
    15057  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
    15058  pAllocationInfo->offset = 0;
    15059  pAllocationInfo->size = hAllocation->GetSize();
    15060  pAllocationInfo->pMappedData = VMA_NULL;
    15061  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15062  return;
    15063  }
    15064  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15065  {
    15066  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15067  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15068  pAllocationInfo->offset = hAllocation->GetOffset();
    15069  pAllocationInfo->size = hAllocation->GetSize();
    15070  pAllocationInfo->pMappedData = VMA_NULL;
    15071  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15072  return;
    15073  }
    15074  else // Last use time earlier than current time.
    15075  {
    15076  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15077  {
    15078  localLastUseFrameIndex = localCurrFrameIndex;
    15079  }
    15080  }
    15081  }
    15082  }
    15083  else
    15084  {
    15085 #if VMA_STATS_STRING_ENABLED
    15086  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15087  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15088  for(;;)
    15089  {
    15090  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15091  if(localLastUseFrameIndex == localCurrFrameIndex)
    15092  {
    15093  break;
    15094  }
    15095  else // Last use time earlier than current time.
    15096  {
    15097  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15098  {
    15099  localLastUseFrameIndex = localCurrFrameIndex;
    15100  }
    15101  }
    15102  }
    15103 #endif
    15104 
    15105  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
    15106  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
    15107  pAllocationInfo->offset = hAllocation->GetOffset();
    15108  pAllocationInfo->size = hAllocation->GetSize();
    15109  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
    15110  pAllocationInfo->pUserData = hAllocation->GetUserData();
    15111  }
    15112 }
    15113 
    15114 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
    15115 {
    15116  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
    15117  if(hAllocation->CanBecomeLost())
    15118  {
    15119  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15120  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15121  for(;;)
    15122  {
    15123  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
    15124  {
    15125  return false;
    15126  }
    15127  else if(localLastUseFrameIndex == localCurrFrameIndex)
    15128  {
    15129  return true;
    15130  }
    15131  else // Last use time earlier than current time.
    15132  {
    15133  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15134  {
    15135  localLastUseFrameIndex = localCurrFrameIndex;
    15136  }
    15137  }
    15138  }
    15139  }
    15140  else
    15141  {
    15142 #if VMA_STATS_STRING_ENABLED
    15143  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
    15144  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
    15145  for(;;)
    15146  {
    15147  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
    15148  if(localLastUseFrameIndex == localCurrFrameIndex)
    15149  {
    15150  break;
    15151  }
    15152  else // Last use time earlier than current time.
    15153  {
    15154  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
    15155  {
    15156  localLastUseFrameIndex = localCurrFrameIndex;
    15157  }
    15158  }
    15159  }
    15160 #endif
    15161 
    15162  return true;
    15163  }
    15164 }
    15165 
    15166 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
    15167 {
    15168  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
    15169 
    15170  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
    15171 
    15172  if(newCreateInfo.maxBlockCount == 0)
    15173  {
    15174  newCreateInfo.maxBlockCount = SIZE_MAX;
    15175  }
    15176  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
    15177  {
    15178  return VK_ERROR_INITIALIZATION_FAILED;
    15179  }
    15180 
    15181  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
    15182 
    15183  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
    15184 
    15185  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
    15186  if(res != VK_SUCCESS)
    15187  {
    15188  vma_delete(this, *pPool);
    15189  *pPool = VMA_NULL;
    15190  return res;
    15191  }
    15192 
    15193  // Add to m_Pools.
    15194  {
    15195  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15196  (*pPool)->SetId(m_NextPoolId++);
    15197  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
    15198  }
    15199 
    15200  return VK_SUCCESS;
    15201 }
    15202 
    15203 void VmaAllocator_T::DestroyPool(VmaPool pool)
    15204 {
    15205  // Remove from m_Pools.
    15206  {
    15207  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
    15208  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
    15209  VMA_ASSERT(success && "Pool not found in Allocator.");
    15210  }
    15211 
    15212  vma_delete(this, pool);
    15213 }
    15214 
    15215 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
    15216 {
    15217  pool->m_BlockVector.GetPoolStats(pPoolStats);
    15218 }
    15219 
    15220 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
    15221 {
    15222  m_CurrentFrameIndex.store(frameIndex);
    15223 }
    15224 
    15225 void VmaAllocator_T::MakePoolAllocationsLost(
    15226  VmaPool hPool,
    15227  size_t* pLostAllocationCount)
    15228 {
    15229  hPool->m_BlockVector.MakePoolAllocationsLost(
    15230  m_CurrentFrameIndex.load(),
    15231  pLostAllocationCount);
    15232 }
    15233 
    15234 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
    15235 {
    15236  return hPool->m_BlockVector.CheckCorruption();
    15237 }
    15238 
    15239 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
    15240 {
    15241  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
    15242 
    15243  // Process default pools.
    15244  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15245  {
    15246  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
    15247  {
    15248  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
    15249  VMA_ASSERT(pBlockVector);
    15250  VkResult localRes = pBlockVector->CheckCorruption();
    15251  switch(localRes)
    15252  {
    15253  case VK_ERROR_FEATURE_NOT_PRESENT:
    15254  break;
    15255  case VK_SUCCESS:
    15256  finalRes = VK_SUCCESS;
    15257  break;
    15258  default:
    15259  return localRes;
    15260  }
    15261  }
    15262  }
    15263 
    15264  // Process custom pools.
    15265  {
    15266  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15267  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
    15268  {
    15269  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
    15270  {
    15271  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
    15272  switch(localRes)
    15273  {
    15274  case VK_ERROR_FEATURE_NOT_PRESENT:
    15275  break;
    15276  case VK_SUCCESS:
    15277  finalRes = VK_SUCCESS;
    15278  break;
    15279  default:
    15280  return localRes;
    15281  }
    15282  }
    15283  }
    15284  }
    15285 
    15286  return finalRes;
    15287 }
    15288 
    15289 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
    15290 {
    15291  *pAllocation = m_AllocationObjectAllocator.Allocate();
    15292  (*pAllocation)->Ctor(VMA_FRAME_INDEX_LOST, false);
    15293  (*pAllocation)->InitLost();
    15294 }
    15295 
    15296 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
    15297 {
    15298  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
    15299 
    15300  VkResult res;
    15301  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15302  {
    15303  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15304  if(m_HeapSizeLimit[heapIndex] >= pAllocateInfo->allocationSize)
    15305  {
    15306  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15307  if(res == VK_SUCCESS)
    15308  {
    15309  m_HeapSizeLimit[heapIndex] -= pAllocateInfo->allocationSize;
    15310  }
    15311  }
    15312  else
    15313  {
    15314  res = VK_ERROR_OUT_OF_DEVICE_MEMORY;
    15315  }
    15316  }
    15317  else
    15318  {
    15319  res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
    15320  }
    15321 
    15322  if(res == VK_SUCCESS && m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
    15323  {
    15324  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize);
    15325  }
    15326 
    15327  return res;
    15328 }
    15329 
    15330 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
    15331 {
    15332  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
    15333  {
    15334  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size);
    15335  }
    15336 
    15337  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
    15338 
    15339  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
    15340  if(m_HeapSizeLimit[heapIndex] != VK_WHOLE_SIZE)
    15341  {
    15342  VmaMutexLock lock(m_HeapSizeLimitMutex, m_UseMutex);
    15343  m_HeapSizeLimit[heapIndex] += size;
    15344  }
    15345 }
    15346 
    15347 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
    15348 {
    15349  if(hAllocation->CanBecomeLost())
    15350  {
    15351  return VK_ERROR_MEMORY_MAP_FAILED;
    15352  }
    15353 
    15354  switch(hAllocation->GetType())
    15355  {
    15356  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15357  {
    15358  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15359  char *pBytes = VMA_NULL;
    15360  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
    15361  if(res == VK_SUCCESS)
    15362  {
    15363  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
    15364  hAllocation->BlockAllocMap();
    15365  }
    15366  return res;
    15367  }
    15368  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15369  return hAllocation->DedicatedAllocMap(this, ppData);
    15370  default:
    15371  VMA_ASSERT(0);
    15372  return VK_ERROR_MEMORY_MAP_FAILED;
    15373  }
    15374 }
    15375 
    15376 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
    15377 {
    15378  switch(hAllocation->GetType())
    15379  {
    15380  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15381  {
    15382  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
    15383  hAllocation->BlockAllocUnmap();
    15384  pBlock->Unmap(this, 1);
    15385  }
    15386  break;
    15387  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15388  hAllocation->DedicatedAllocUnmap(this);
    15389  break;
    15390  default:
    15391  VMA_ASSERT(0);
    15392  }
    15393 }
    15394 
    15395 VkResult VmaAllocator_T::BindBufferMemory(VmaAllocation hAllocation, VkBuffer hBuffer)
    15396 {
    15397  VkResult res = VK_SUCCESS;
    15398  switch(hAllocation->GetType())
    15399  {
    15400  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15401  res = GetVulkanFunctions().vkBindBufferMemory(
    15402  m_hDevice,
    15403  hBuffer,
    15404  hAllocation->GetMemory(),
    15405  0); //memoryOffset
    15406  break;
    15407  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15408  {
    15409  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15410  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
    15411  res = pBlock->BindBufferMemory(this, hAllocation, hBuffer);
    15412  break;
    15413  }
    15414  default:
    15415  VMA_ASSERT(0);
    15416  }
    15417  return res;
    15418 }
    15419 
    15420 VkResult VmaAllocator_T::BindImageMemory(VmaAllocation hAllocation, VkImage hImage)
    15421 {
    15422  VkResult res = VK_SUCCESS;
    15423  switch(hAllocation->GetType())
    15424  {
    15425  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15426  res = GetVulkanFunctions().vkBindImageMemory(
    15427  m_hDevice,
    15428  hImage,
    15429  hAllocation->GetMemory(),
    15430  0); //memoryOffset
    15431  break;
    15432  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15433  {
    15434  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
    15435  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
    15436  res = pBlock->BindImageMemory(this, hAllocation, hImage);
    15437  break;
    15438  }
    15439  default:
    15440  VMA_ASSERT(0);
    15441  }
    15442  return res;
    15443 }
    15444 
    15445 void VmaAllocator_T::FlushOrInvalidateAllocation(
    15446  VmaAllocation hAllocation,
    15447  VkDeviceSize offset, VkDeviceSize size,
    15448  VMA_CACHE_OPERATION op)
    15449 {
    15450  const uint32_t memTypeIndex = hAllocation->GetMemoryTypeIndex();
    15451  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
    15452  {
    15453  const VkDeviceSize allocationSize = hAllocation->GetSize();
    15454  VMA_ASSERT(offset <= allocationSize);
    15455 
    15456  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
    15457 
    15458  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
    15459  memRange.memory = hAllocation->GetMemory();
    15460 
    15461  switch(hAllocation->GetType())
    15462  {
    15463  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
    15464  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15465  if(size == VK_WHOLE_SIZE)
    15466  {
    15467  memRange.size = allocationSize - memRange.offset;
    15468  }
    15469  else
    15470  {
    15471  VMA_ASSERT(offset + size <= allocationSize);
    15472  memRange.size = VMA_MIN(
    15473  VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize),
    15474  allocationSize - memRange.offset);
    15475  }
    15476  break;
    15477 
    15478  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
    15479  {
    15480  // 1. Still within this allocation.
    15481  memRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
    15482  if(size == VK_WHOLE_SIZE)
    15483  {
    15484  size = allocationSize - offset;
    15485  }
    15486  else
    15487  {
    15488  VMA_ASSERT(offset + size <= allocationSize);
    15489  }
    15490  memRange.size = VmaAlignUp(size + (offset - memRange.offset), nonCoherentAtomSize);
    15491 
    15492  // 2. Adjust to whole block.
    15493  const VkDeviceSize allocationOffset = hAllocation->GetOffset();
    15494  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
    15495  const VkDeviceSize blockSize = hAllocation->GetBlock()->m_pMetadata->GetSize();
    15496  memRange.offset += allocationOffset;
    15497  memRange.size = VMA_MIN(memRange.size, blockSize - memRange.offset);
    15498 
    15499  break;
    15500  }
    15501 
    15502  default:
    15503  VMA_ASSERT(0);
    15504  }
    15505 
    15506  switch(op)
    15507  {
    15508  case VMA_CACHE_FLUSH:
    15509  (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15510  break;
    15511  case VMA_CACHE_INVALIDATE:
    15512  (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
    15513  break;
    15514  default:
    15515  VMA_ASSERT(0);
    15516  }
    15517  }
    15518  // else: Just ignore this call.
    15519 }
    15520 
    15521 void VmaAllocator_T::FreeDedicatedMemory(VmaAllocation allocation)
    15522 {
    15523  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
    15524 
    15525  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
    15526  {
    15527  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15528  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
    15529  VMA_ASSERT(pDedicatedAllocations);
    15530  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
    15531  VMA_ASSERT(success);
    15532  }
    15533 
    15534  VkDeviceMemory hMemory = allocation->GetMemory();
    15535 
    15536  /*
    15537  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
    15538  before vkFreeMemory.
    15539 
    15540  if(allocation->GetMappedData() != VMA_NULL)
    15541  {
    15542  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
    15543  }
    15544  */
    15545 
    15546  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
    15547 
    15548  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
    15549 }
    15550 
    15551 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
    15552 {
    15553  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
    15554  !hAllocation->CanBecomeLost() &&
    15555  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15556  {
    15557  void* pData = VMA_NULL;
    15558  VkResult res = Map(hAllocation, &pData);
    15559  if(res == VK_SUCCESS)
    15560  {
    15561  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
    15562  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
    15563  Unmap(hAllocation);
    15564  }
    15565  else
    15566  {
    15567  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
    15568  }
    15569  }
    15570 }
    15571 
    15572 #if VMA_STATS_STRING_ENABLED
    15573 
    15574 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
    15575 {
    15576  bool dedicatedAllocationsStarted = false;
    15577  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15578  {
    15579  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
    15580  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
    15581  VMA_ASSERT(pDedicatedAllocVector);
    15582  if(pDedicatedAllocVector->empty() == false)
    15583  {
    15584  if(dedicatedAllocationsStarted == false)
    15585  {
    15586  dedicatedAllocationsStarted = true;
    15587  json.WriteString("DedicatedAllocations");
    15588  json.BeginObject();
    15589  }
    15590 
    15591  json.BeginString("Type ");
    15592  json.ContinueString(memTypeIndex);
    15593  json.EndString();
    15594 
    15595  json.BeginArray();
    15596 
    15597  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
    15598  {
    15599  json.BeginObject(true);
    15600  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
    15601  hAlloc->PrintParameters(json);
    15602  json.EndObject();
    15603  }
    15604 
    15605  json.EndArray();
    15606  }
    15607  }
    15608  if(dedicatedAllocationsStarted)
    15609  {
    15610  json.EndObject();
    15611  }
    15612 
    15613  {
    15614  bool allocationsStarted = false;
    15615  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
    15616  {
    15617  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
    15618  {
    15619  if(allocationsStarted == false)
    15620  {
    15621  allocationsStarted = true;
    15622  json.WriteString("DefaultPools");
    15623  json.BeginObject();
    15624  }
    15625 
    15626  json.BeginString("Type ");
    15627  json.ContinueString(memTypeIndex);
    15628  json.EndString();
    15629 
    15630  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
    15631  }
    15632  }
    15633  if(allocationsStarted)
    15634  {
    15635  json.EndObject();
    15636  }
    15637  }
    15638 
    15639  // Custom pools
    15640  {
    15641  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
    15642  const size_t poolCount = m_Pools.size();
    15643  if(poolCount > 0)
    15644  {
    15645  json.WriteString("Pools");
    15646  json.BeginObject();
    15647  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
    15648  {
    15649  json.BeginString();
    15650  json.ContinueString(m_Pools[poolIndex]->GetId());
    15651  json.EndString();
    15652 
    15653  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
    15654  }
    15655  json.EndObject();
    15656  }
    15657  }
    15658 }
    15659 
    15660 #endif // #if VMA_STATS_STRING_ENABLED
    15661 
    15663 // Public interface
    15664 
    15665 VkResult vmaCreateAllocator(
    15666  const VmaAllocatorCreateInfo* pCreateInfo,
    15667  VmaAllocator* pAllocator)
    15668 {
    15669  VMA_ASSERT(pCreateInfo && pAllocator);
    15670  VMA_DEBUG_LOG("vmaCreateAllocator");
    15671  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
    15672  return (*pAllocator)->Init(pCreateInfo);
    15673 }
    15674 
    15675 void vmaDestroyAllocator(
    15676  VmaAllocator allocator)
    15677 {
    15678  if(allocator != VK_NULL_HANDLE)
    15679  {
    15680  VMA_DEBUG_LOG("vmaDestroyAllocator");
    15681  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
    15682  vma_delete(&allocationCallbacks, allocator);
    15683  }
    15684 }
    15685 
    15687  VmaAllocator allocator,
    15688  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
    15689 {
    15690  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
    15691  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
    15692 }
    15693 
    15695  VmaAllocator allocator,
    15696  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
    15697 {
    15698  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
    15699  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
    15700 }
    15701 
    15703  VmaAllocator allocator,
    15704  uint32_t memoryTypeIndex,
    15705  VkMemoryPropertyFlags* pFlags)
    15706 {
    15707  VMA_ASSERT(allocator && pFlags);
    15708  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
    15709  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
    15710 }
    15711 
    15713  VmaAllocator allocator,
    15714  uint32_t frameIndex)
    15715 {
    15716  VMA_ASSERT(allocator);
    15717  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
    15718 
    15719  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15720 
    15721  allocator->SetCurrentFrameIndex(frameIndex);
    15722 }
    15723 
    15724 void vmaCalculateStats(
    15725  VmaAllocator allocator,
    15726  VmaStats* pStats)
    15727 {
    15728  VMA_ASSERT(allocator && pStats);
    15729  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15730  allocator->CalculateStats(pStats);
    15731 }
    15732 
    15733 #if VMA_STATS_STRING_ENABLED
    15734 
    15735 void vmaBuildStatsString(
    15736  VmaAllocator allocator,
    15737  char** ppStatsString,
    15738  VkBool32 detailedMap)
    15739 {
    15740  VMA_ASSERT(allocator && ppStatsString);
    15741  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    15742 
    15743  VmaStringBuilder sb(allocator);
    15744  {
    15745  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
    15746  json.BeginObject();
    15747 
    15748  VmaStats stats;
    15749  allocator->CalculateStats(&stats);
    15750 
    15751  json.WriteString("Total");
    15752  VmaPrintStatInfo(json, stats.total);
    15753 
    15754  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
    15755  {
    15756  json.BeginString("Heap ");
    15757  json.ContinueString(heapIndex);
    15758  json.EndString();
    15759  json.BeginObject();
    15760 
    15761  json.WriteString("Size");
    15762  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
    15763 
    15764  json.WriteString("Flags");
    15765  json.BeginArray(true);
    15766  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
    15767  {
    15768  json.WriteString("DEVICE_LOCAL");
    15769  }
    15770  json.EndArray();
    15771 
    15772  if(stats.memoryHeap[heapIndex].blockCount > 0)
    15773  {
    15774  json.WriteString("Stats");
    15775  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
    15776  }
    15777 
    15778  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
    15779  {
    15780  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
    15781  {
    15782  json.BeginString("Type ");
    15783  json.ContinueString(typeIndex);
    15784  json.EndString();
    15785 
    15786  json.BeginObject();
    15787 
    15788  json.WriteString("Flags");
    15789  json.BeginArray(true);
    15790  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
    15791  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
    15792  {
    15793  json.WriteString("DEVICE_LOCAL");
    15794  }
    15795  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
    15796  {
    15797  json.WriteString("HOST_VISIBLE");
    15798  }
    15799  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
    15800  {
    15801  json.WriteString("HOST_COHERENT");
    15802  }
    15803  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
    15804  {
    15805  json.WriteString("HOST_CACHED");
    15806  }
    15807  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
    15808  {
    15809  json.WriteString("LAZILY_ALLOCATED");
    15810  }
    15811  json.EndArray();
    15812 
    15813  if(stats.memoryType[typeIndex].blockCount > 0)
    15814  {
    15815  json.WriteString("Stats");
    15816  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
    15817  }
    15818 
    15819  json.EndObject();
    15820  }
    15821  }
    15822 
    15823  json.EndObject();
    15824  }
    15825  if(detailedMap == VK_TRUE)
    15826  {
    15827  allocator->PrintDetailedMap(json);
    15828  }
    15829 
    15830  json.EndObject();
    15831  }
    15832 
    15833  const size_t len = sb.GetLength();
    15834  char* const pChars = vma_new_array(allocator, char, len + 1);
    15835  if(len > 0)
    15836  {
    15837  memcpy(pChars, sb.GetData(), len);
    15838  }
    15839  pChars[len] = '\0';
    15840  *ppStatsString = pChars;
    15841 }
    15842 
    15843 void vmaFreeStatsString(
    15844  VmaAllocator allocator,
    15845  char* pStatsString)
    15846 {
    15847  if(pStatsString != VMA_NULL)
    15848  {
    15849  VMA_ASSERT(allocator);
    15850  size_t len = strlen(pStatsString);
    15851  vma_delete_array(allocator, pStatsString, len + 1);
    15852  }
    15853 }
    15854 
    15855 #endif // #if VMA_STATS_STRING_ENABLED
    15856 
    15857 /*
    15858 This function is not protected by any mutex because it just reads immutable data.
    15859 */
    15860 VkResult vmaFindMemoryTypeIndex(
    15861  VmaAllocator allocator,
    15862  uint32_t memoryTypeBits,
    15863  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15864  uint32_t* pMemoryTypeIndex)
    15865 {
    15866  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15867  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15868  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15869 
    15870  if(pAllocationCreateInfo->memoryTypeBits != 0)
    15871  {
    15872  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
    15873  }
    15874 
    15875  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
    15876  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
    15877 
    15878  const bool mapped = (pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
    15879  if(mapped)
    15880  {
    15881  preferredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15882  }
    15883 
    15884  // Convert usage to requiredFlags and preferredFlags.
    15885  switch(pAllocationCreateInfo->usage)
    15886  {
    15888  break;
    15890  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15891  {
    15892  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15893  }
    15894  break;
    15896  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
    15897  break;
    15899  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15900  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
    15901  {
    15902  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
    15903  }
    15904  break;
    15906  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
    15907  preferredFlags |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
    15908  break;
    15909  default:
    15910  break;
    15911  }
    15912 
    15913  *pMemoryTypeIndex = UINT32_MAX;
    15914  uint32_t minCost = UINT32_MAX;
    15915  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
    15916  memTypeIndex < allocator->GetMemoryTypeCount();
    15917  ++memTypeIndex, memTypeBit <<= 1)
    15918  {
    15919  // This memory type is acceptable according to memoryTypeBits bitmask.
    15920  if((memTypeBit & memoryTypeBits) != 0)
    15921  {
    15922  const VkMemoryPropertyFlags currFlags =
    15923  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
    15924  // This memory type contains requiredFlags.
    15925  if((requiredFlags & ~currFlags) == 0)
    15926  {
    15927  // Calculate cost as number of bits from preferredFlags not present in this memory type.
    15928  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags);
    15929  // Remember memory type with lowest cost.
    15930  if(currCost < minCost)
    15931  {
    15932  *pMemoryTypeIndex = memTypeIndex;
    15933  if(currCost == 0)
    15934  {
    15935  return VK_SUCCESS;
    15936  }
    15937  minCost = currCost;
    15938  }
    15939  }
    15940  }
    15941  }
    15942  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
    15943 }
    15944 
    15946  VmaAllocator allocator,
    15947  const VkBufferCreateInfo* pBufferCreateInfo,
    15948  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15949  uint32_t* pMemoryTypeIndex)
    15950 {
    15951  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15952  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
    15953  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15954  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15955 
    15956  const VkDevice hDev = allocator->m_hDevice;
    15957  VkBuffer hBuffer = VK_NULL_HANDLE;
    15958  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
    15959  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
    15960  if(res == VK_SUCCESS)
    15961  {
    15962  VkMemoryRequirements memReq = {};
    15963  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
    15964  hDev, hBuffer, &memReq);
    15965 
    15966  res = vmaFindMemoryTypeIndex(
    15967  allocator,
    15968  memReq.memoryTypeBits,
    15969  pAllocationCreateInfo,
    15970  pMemoryTypeIndex);
    15971 
    15972  allocator->GetVulkanFunctions().vkDestroyBuffer(
    15973  hDev, hBuffer, allocator->GetAllocationCallbacks());
    15974  }
    15975  return res;
    15976 }
    15977 
    15979  VmaAllocator allocator,
    15980  const VkImageCreateInfo* pImageCreateInfo,
    15981  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    15982  uint32_t* pMemoryTypeIndex)
    15983 {
    15984  VMA_ASSERT(allocator != VK_NULL_HANDLE);
    15985  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
    15986  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
    15987  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
    15988 
    15989  const VkDevice hDev = allocator->m_hDevice;
    15990  VkImage hImage = VK_NULL_HANDLE;
    15991  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
    15992  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
    15993  if(res == VK_SUCCESS)
    15994  {
    15995  VkMemoryRequirements memReq = {};
    15996  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
    15997  hDev, hImage, &memReq);
    15998 
    15999  res = vmaFindMemoryTypeIndex(
    16000  allocator,
    16001  memReq.memoryTypeBits,
    16002  pAllocationCreateInfo,
    16003  pMemoryTypeIndex);
    16004 
    16005  allocator->GetVulkanFunctions().vkDestroyImage(
    16006  hDev, hImage, allocator->GetAllocationCallbacks());
    16007  }
    16008  return res;
    16009 }
    16010 
    16011 VkResult vmaCreatePool(
    16012  VmaAllocator allocator,
    16013  const VmaPoolCreateInfo* pCreateInfo,
    16014  VmaPool* pPool)
    16015 {
    16016  VMA_ASSERT(allocator && pCreateInfo && pPool);
    16017 
    16018  VMA_DEBUG_LOG("vmaCreatePool");
    16019 
    16020  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16021 
    16022  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
    16023 
    16024 #if VMA_RECORDING_ENABLED
    16025  if(allocator->GetRecorder() != VMA_NULL)
    16026  {
    16027  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
    16028  }
    16029 #endif
    16030 
    16031  return res;
    16032 }
    16033 
    16034 void vmaDestroyPool(
    16035  VmaAllocator allocator,
    16036  VmaPool pool)
    16037 {
    16038  VMA_ASSERT(allocator);
    16039 
    16040  if(pool == VK_NULL_HANDLE)
    16041  {
    16042  return;
    16043  }
    16044 
    16045  VMA_DEBUG_LOG("vmaDestroyPool");
    16046 
    16047  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16048 
    16049 #if VMA_RECORDING_ENABLED
    16050  if(allocator->GetRecorder() != VMA_NULL)
    16051  {
    16052  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
    16053  }
    16054 #endif
    16055 
    16056  allocator->DestroyPool(pool);
    16057 }
    16058 
    16059 void vmaGetPoolStats(
    16060  VmaAllocator allocator,
    16061  VmaPool pool,
    16062  VmaPoolStats* pPoolStats)
    16063 {
    16064  VMA_ASSERT(allocator && pool && pPoolStats);
    16065 
    16066  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16067 
    16068  allocator->GetPoolStats(pool, pPoolStats);
    16069 }
    16070 
    16072  VmaAllocator allocator,
    16073  VmaPool pool,
    16074  size_t* pLostAllocationCount)
    16075 {
    16076  VMA_ASSERT(allocator && pool);
    16077 
    16078  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16079 
    16080 #if VMA_RECORDING_ENABLED
    16081  if(allocator->GetRecorder() != VMA_NULL)
    16082  {
    16083  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
    16084  }
    16085 #endif
    16086 
    16087  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
    16088 }
    16089 
    16090 VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    16091 {
    16092  VMA_ASSERT(allocator && pool);
    16093 
    16094  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16095 
    16096  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
    16097 
    16098  return allocator->CheckPoolCorruption(pool);
    16099 }
    16100 
    16101 VkResult vmaAllocateMemory(
    16102  VmaAllocator allocator,
    16103  const VkMemoryRequirements* pVkMemoryRequirements,
    16104  const VmaAllocationCreateInfo* pCreateInfo,
    16105  VmaAllocation* pAllocation,
    16106  VmaAllocationInfo* pAllocationInfo)
    16107 {
    16108  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
    16109 
    16110  VMA_DEBUG_LOG("vmaAllocateMemory");
    16111 
    16112  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16113 
    16114  VkResult result = allocator->AllocateMemory(
    16115  *pVkMemoryRequirements,
    16116  false, // requiresDedicatedAllocation
    16117  false, // prefersDedicatedAllocation
    16118  VK_NULL_HANDLE, // dedicatedBuffer
    16119  VK_NULL_HANDLE, // dedicatedImage
    16120  *pCreateInfo,
    16121  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16122  1, // allocationCount
    16123  pAllocation);
    16124 
    16125 #if VMA_RECORDING_ENABLED
    16126  if(allocator->GetRecorder() != VMA_NULL)
    16127  {
    16128  allocator->GetRecorder()->RecordAllocateMemory(
    16129  allocator->GetCurrentFrameIndex(),
    16130  *pVkMemoryRequirements,
    16131  *pCreateInfo,
    16132  *pAllocation);
    16133  }
    16134 #endif
    16135 
    16136  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16137  {
    16138  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16139  }
    16140 
    16141  return result;
    16142 }
    16143 
    16144 VkResult vmaAllocateMemoryPages(
    16145  VmaAllocator allocator,
    16146  const VkMemoryRequirements* pVkMemoryRequirements,
    16147  const VmaAllocationCreateInfo* pCreateInfo,
    16148  size_t allocationCount,
    16149  VmaAllocation* pAllocations,
    16150  VmaAllocationInfo* pAllocationInfo)
    16151 {
    16152  if(allocationCount == 0)
    16153  {
    16154  return VK_SUCCESS;
    16155  }
    16156 
    16157  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
    16158 
    16159  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
    16160 
    16161  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16162 
    16163  VkResult result = allocator->AllocateMemory(
    16164  *pVkMemoryRequirements,
    16165  false, // requiresDedicatedAllocation
    16166  false, // prefersDedicatedAllocation
    16167  VK_NULL_HANDLE, // dedicatedBuffer
    16168  VK_NULL_HANDLE, // dedicatedImage
    16169  *pCreateInfo,
    16170  VMA_SUBALLOCATION_TYPE_UNKNOWN,
    16171  allocationCount,
    16172  pAllocations);
    16173 
    16174 #if VMA_RECORDING_ENABLED
    16175  if(allocator->GetRecorder() != VMA_NULL)
    16176  {
    16177  allocator->GetRecorder()->RecordAllocateMemoryPages(
    16178  allocator->GetCurrentFrameIndex(),
    16179  *pVkMemoryRequirements,
    16180  *pCreateInfo,
    16181  (uint64_t)allocationCount,
    16182  pAllocations);
    16183  }
    16184 #endif
    16185 
    16186  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
    16187  {
    16188  for(size_t i = 0; i < allocationCount; ++i)
    16189  {
    16190  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
    16191  }
    16192  }
    16193 
    16194  return result;
    16195 }
    16196 
    16198  VmaAllocator allocator,
    16199  VkBuffer buffer,
    16200  const VmaAllocationCreateInfo* pCreateInfo,
    16201  VmaAllocation* pAllocation,
    16202  VmaAllocationInfo* pAllocationInfo)
    16203 {
    16204  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16205 
    16206  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
    16207 
    16208  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16209 
    16210  VkMemoryRequirements vkMemReq = {};
    16211  bool requiresDedicatedAllocation = false;
    16212  bool prefersDedicatedAllocation = false;
    16213  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
    16214  requiresDedicatedAllocation,
    16215  prefersDedicatedAllocation);
    16216 
    16217  VkResult result = allocator->AllocateMemory(
    16218  vkMemReq,
    16219  requiresDedicatedAllocation,
    16220  prefersDedicatedAllocation,
    16221  buffer, // dedicatedBuffer
    16222  VK_NULL_HANDLE, // dedicatedImage
    16223  *pCreateInfo,
    16224  VMA_SUBALLOCATION_TYPE_BUFFER,
    16225  1, // allocationCount
    16226  pAllocation);
    16227 
    16228 #if VMA_RECORDING_ENABLED
    16229  if(allocator->GetRecorder() != VMA_NULL)
    16230  {
    16231  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
    16232  allocator->GetCurrentFrameIndex(),
    16233  vkMemReq,
    16234  requiresDedicatedAllocation,
    16235  prefersDedicatedAllocation,
    16236  *pCreateInfo,
    16237  *pAllocation);
    16238  }
    16239 #endif
    16240 
    16241  if(pAllocationInfo && result == VK_SUCCESS)
    16242  {
    16243  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16244  }
    16245 
    16246  return result;
    16247 }
    16248 
    16249 VkResult vmaAllocateMemoryForImage(
    16250  VmaAllocator allocator,
    16251  VkImage image,
    16252  const VmaAllocationCreateInfo* pCreateInfo,
    16253  VmaAllocation* pAllocation,
    16254  VmaAllocationInfo* pAllocationInfo)
    16255 {
    16256  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
    16257 
    16258  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
    16259 
    16260  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16261 
    16262  VkMemoryRequirements vkMemReq = {};
    16263  bool requiresDedicatedAllocation = false;
    16264  bool prefersDedicatedAllocation = false;
    16265  allocator->GetImageMemoryRequirements(image, vkMemReq,
    16266  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16267 
    16268  VkResult result = allocator->AllocateMemory(
    16269  vkMemReq,
    16270  requiresDedicatedAllocation,
    16271  prefersDedicatedAllocation,
    16272  VK_NULL_HANDLE, // dedicatedBuffer
    16273  image, // dedicatedImage
    16274  *pCreateInfo,
    16275  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
    16276  1, // allocationCount
    16277  pAllocation);
    16278 
    16279 #if VMA_RECORDING_ENABLED
    16280  if(allocator->GetRecorder() != VMA_NULL)
    16281  {
    16282  allocator->GetRecorder()->RecordAllocateMemoryForImage(
    16283  allocator->GetCurrentFrameIndex(),
    16284  vkMemReq,
    16285  requiresDedicatedAllocation,
    16286  prefersDedicatedAllocation,
    16287  *pCreateInfo,
    16288  *pAllocation);
    16289  }
    16290 #endif
    16291 
    16292  if(pAllocationInfo && result == VK_SUCCESS)
    16293  {
    16294  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16295  }
    16296 
    16297  return result;
    16298 }
    16299 
    16300 void vmaFreeMemory(
    16301  VmaAllocator allocator,
    16302  VmaAllocation allocation)
    16303 {
    16304  VMA_ASSERT(allocator);
    16305 
    16306  if(allocation == VK_NULL_HANDLE)
    16307  {
    16308  return;
    16309  }
    16310 
    16311  VMA_DEBUG_LOG("vmaFreeMemory");
    16312 
    16313  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16314 
    16315 #if VMA_RECORDING_ENABLED
    16316  if(allocator->GetRecorder() != VMA_NULL)
    16317  {
    16318  allocator->GetRecorder()->RecordFreeMemory(
    16319  allocator->GetCurrentFrameIndex(),
    16320  allocation);
    16321  }
    16322 #endif
    16323 
    16324  allocator->FreeMemory(
    16325  1, // allocationCount
    16326  &allocation);
    16327 }
    16328 
    16329 void vmaFreeMemoryPages(
    16330  VmaAllocator allocator,
    16331  size_t allocationCount,
    16332  VmaAllocation* pAllocations)
    16333 {
    16334  if(allocationCount == 0)
    16335  {
    16336  return;
    16337  }
    16338 
    16339  VMA_ASSERT(allocator);
    16340 
    16341  VMA_DEBUG_LOG("vmaFreeMemoryPages");
    16342 
    16343  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16344 
    16345 #if VMA_RECORDING_ENABLED
    16346  if(allocator->GetRecorder() != VMA_NULL)
    16347  {
    16348  allocator->GetRecorder()->RecordFreeMemoryPages(
    16349  allocator->GetCurrentFrameIndex(),
    16350  (uint64_t)allocationCount,
    16351  pAllocations);
    16352  }
    16353 #endif
    16354 
    16355  allocator->FreeMemory(allocationCount, pAllocations);
    16356 }
    16357 
    16358 VkResult vmaResizeAllocation(
    16359  VmaAllocator allocator,
    16360  VmaAllocation allocation,
    16361  VkDeviceSize newSize)
    16362 {
    16363  VMA_ASSERT(allocator && allocation);
    16364 
    16365  VMA_DEBUG_LOG("vmaResizeAllocation");
    16366 
    16367  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16368 
    16369 #if VMA_RECORDING_ENABLED
    16370  if(allocator->GetRecorder() != VMA_NULL)
    16371  {
    16372  allocator->GetRecorder()->RecordResizeAllocation(
    16373  allocator->GetCurrentFrameIndex(),
    16374  allocation,
    16375  newSize);
    16376  }
    16377 #endif
    16378 
    16379  return allocator->ResizeAllocation(allocation, newSize);
    16380 }
    16381 
    16383  VmaAllocator allocator,
    16384  VmaAllocation allocation,
    16385  VmaAllocationInfo* pAllocationInfo)
    16386 {
    16387  VMA_ASSERT(allocator && allocation && pAllocationInfo);
    16388 
    16389  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16390 
    16391 #if VMA_RECORDING_ENABLED
    16392  if(allocator->GetRecorder() != VMA_NULL)
    16393  {
    16394  allocator->GetRecorder()->RecordGetAllocationInfo(
    16395  allocator->GetCurrentFrameIndex(),
    16396  allocation);
    16397  }
    16398 #endif
    16399 
    16400  allocator->GetAllocationInfo(allocation, pAllocationInfo);
    16401 }
    16402 
    16403 VkBool32 vmaTouchAllocation(
    16404  VmaAllocator allocator,
    16405  VmaAllocation allocation)
    16406 {
    16407  VMA_ASSERT(allocator && allocation);
    16408 
    16409  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16410 
    16411 #if VMA_RECORDING_ENABLED
    16412  if(allocator->GetRecorder() != VMA_NULL)
    16413  {
    16414  allocator->GetRecorder()->RecordTouchAllocation(
    16415  allocator->GetCurrentFrameIndex(),
    16416  allocation);
    16417  }
    16418 #endif
    16419 
    16420  return allocator->TouchAllocation(allocation);
    16421 }
    16422 
    16424  VmaAllocator allocator,
    16425  VmaAllocation allocation,
    16426  void* pUserData)
    16427 {
    16428  VMA_ASSERT(allocator && allocation);
    16429 
    16430  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16431 
    16432  allocation->SetUserData(allocator, pUserData);
    16433 
    16434 #if VMA_RECORDING_ENABLED
    16435  if(allocator->GetRecorder() != VMA_NULL)
    16436  {
    16437  allocator->GetRecorder()->RecordSetAllocationUserData(
    16438  allocator->GetCurrentFrameIndex(),
    16439  allocation,
    16440  pUserData);
    16441  }
    16442 #endif
    16443 }
    16444 
    16446  VmaAllocator allocator,
    16447  VmaAllocation* pAllocation)
    16448 {
    16449  VMA_ASSERT(allocator && pAllocation);
    16450 
    16451  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
    16452 
    16453  allocator->CreateLostAllocation(pAllocation);
    16454 
    16455 #if VMA_RECORDING_ENABLED
    16456  if(allocator->GetRecorder() != VMA_NULL)
    16457  {
    16458  allocator->GetRecorder()->RecordCreateLostAllocation(
    16459  allocator->GetCurrentFrameIndex(),
    16460  *pAllocation);
    16461  }
    16462 #endif
    16463 }
    16464 
    16465 VkResult vmaMapMemory(
    16466  VmaAllocator allocator,
    16467  VmaAllocation allocation,
    16468  void** ppData)
    16469 {
    16470  VMA_ASSERT(allocator && allocation && ppData);
    16471 
    16472  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16473 
    16474  VkResult res = allocator->Map(allocation, ppData);
    16475 
    16476 #if VMA_RECORDING_ENABLED
    16477  if(allocator->GetRecorder() != VMA_NULL)
    16478  {
    16479  allocator->GetRecorder()->RecordMapMemory(
    16480  allocator->GetCurrentFrameIndex(),
    16481  allocation);
    16482  }
    16483 #endif
    16484 
    16485  return res;
    16486 }
    16487 
    16488 void vmaUnmapMemory(
    16489  VmaAllocator allocator,
    16490  VmaAllocation allocation)
    16491 {
    16492  VMA_ASSERT(allocator && allocation);
    16493 
    16494  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16495 
    16496 #if VMA_RECORDING_ENABLED
    16497  if(allocator->GetRecorder() != VMA_NULL)
    16498  {
    16499  allocator->GetRecorder()->RecordUnmapMemory(
    16500  allocator->GetCurrentFrameIndex(),
    16501  allocation);
    16502  }
    16503 #endif
    16504 
    16505  allocator->Unmap(allocation);
    16506 }
    16507 
    16508 void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16509 {
    16510  VMA_ASSERT(allocator && allocation);
    16511 
    16512  VMA_DEBUG_LOG("vmaFlushAllocation");
    16513 
    16514  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16515 
    16516  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
    16517 
    16518 #if VMA_RECORDING_ENABLED
    16519  if(allocator->GetRecorder() != VMA_NULL)
    16520  {
    16521  allocator->GetRecorder()->RecordFlushAllocation(
    16522  allocator->GetCurrentFrameIndex(),
    16523  allocation, offset, size);
    16524  }
    16525 #endif
    16526 }
    16527 
    16528 void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    16529 {
    16530  VMA_ASSERT(allocator && allocation);
    16531 
    16532  VMA_DEBUG_LOG("vmaInvalidateAllocation");
    16533 
    16534  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16535 
    16536  allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
    16537 
    16538 #if VMA_RECORDING_ENABLED
    16539  if(allocator->GetRecorder() != VMA_NULL)
    16540  {
    16541  allocator->GetRecorder()->RecordInvalidateAllocation(
    16542  allocator->GetCurrentFrameIndex(),
    16543  allocation, offset, size);
    16544  }
    16545 #endif
    16546 }
    16547 
    16548 VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    16549 {
    16550  VMA_ASSERT(allocator);
    16551 
    16552  VMA_DEBUG_LOG("vmaCheckCorruption");
    16553 
    16554  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16555 
    16556  return allocator->CheckCorruption(memoryTypeBits);
    16557 }
    16558 
    16559 VkResult vmaDefragment(
    16560  VmaAllocator allocator,
    16561  VmaAllocation* pAllocations,
    16562  size_t allocationCount,
    16563  VkBool32* pAllocationsChanged,
    16564  const VmaDefragmentationInfo *pDefragmentationInfo,
    16565  VmaDefragmentationStats* pDefragmentationStats)
    16566 {
    16567  // Deprecated interface, reimplemented using new one.
    16568 
    16569  VmaDefragmentationInfo2 info2 = {};
    16570  info2.allocationCount = (uint32_t)allocationCount;
    16571  info2.pAllocations = pAllocations;
    16572  info2.pAllocationsChanged = pAllocationsChanged;
    16573  if(pDefragmentationInfo != VMA_NULL)
    16574  {
    16575  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
    16576  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
    16577  }
    16578  else
    16579  {
    16580  info2.maxCpuAllocationsToMove = UINT32_MAX;
    16581  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
    16582  }
    16583  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
    16584 
    16586  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
    16587  if(res == VK_NOT_READY)
    16588  {
    16589  res = vmaDefragmentationEnd( allocator, ctx);
    16590  }
    16591  return res;
    16592 }
    16593 
    16594 VkResult vmaDefragmentationBegin(
    16595  VmaAllocator allocator,
    16596  const VmaDefragmentationInfo2* pInfo,
    16597  VmaDefragmentationStats* pStats,
    16598  VmaDefragmentationContext *pContext)
    16599 {
    16600  VMA_ASSERT(allocator && pInfo && pContext);
    16601 
    16602  // Degenerate case: Nothing to defragment.
    16603  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
    16604  {
    16605  return VK_SUCCESS;
    16606  }
    16607 
    16608  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
    16609  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
    16610  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
    16611  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
    16612 
    16613  VMA_DEBUG_LOG("vmaDefragmentationBegin");
    16614 
    16615  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16616 
    16617  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
    16618 
    16619 #if VMA_RECORDING_ENABLED
    16620  if(allocator->GetRecorder() != VMA_NULL)
    16621  {
    16622  allocator->GetRecorder()->RecordDefragmentationBegin(
    16623  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
    16624  }
    16625 #endif
    16626 
    16627  return res;
    16628 }
    16629 
    16630 VkResult vmaDefragmentationEnd(
    16631  VmaAllocator allocator,
    16632  VmaDefragmentationContext context)
    16633 {
    16634  VMA_ASSERT(allocator);
    16635 
    16636  VMA_DEBUG_LOG("vmaDefragmentationEnd");
    16637 
    16638  if(context != VK_NULL_HANDLE)
    16639  {
    16640  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16641 
    16642 #if VMA_RECORDING_ENABLED
    16643  if(allocator->GetRecorder() != VMA_NULL)
    16644  {
    16645  allocator->GetRecorder()->RecordDefragmentationEnd(
    16646  allocator->GetCurrentFrameIndex(), context);
    16647  }
    16648 #endif
    16649 
    16650  return allocator->DefragmentationEnd(context);
    16651  }
    16652  else
    16653  {
    16654  return VK_SUCCESS;
    16655  }
    16656 }
    16657 
    16658 VkResult vmaBindBufferMemory(
    16659  VmaAllocator allocator,
    16660  VmaAllocation allocation,
    16661  VkBuffer buffer)
    16662 {
    16663  VMA_ASSERT(allocator && allocation && buffer);
    16664 
    16665  VMA_DEBUG_LOG("vmaBindBufferMemory");
    16666 
    16667  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16668 
    16669  return allocator->BindBufferMemory(allocation, buffer);
    16670 }
    16671 
    16672 VkResult vmaBindImageMemory(
    16673  VmaAllocator allocator,
    16674  VmaAllocation allocation,
    16675  VkImage image)
    16676 {
    16677  VMA_ASSERT(allocator && allocation && image);
    16678 
    16679  VMA_DEBUG_LOG("vmaBindImageMemory");
    16680 
    16681  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16682 
    16683  return allocator->BindImageMemory(allocation, image);
    16684 }
    16685 
    16686 VkResult vmaCreateBuffer(
    16687  VmaAllocator allocator,
    16688  const VkBufferCreateInfo* pBufferCreateInfo,
    16689  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16690  VkBuffer* pBuffer,
    16691  VmaAllocation* pAllocation,
    16692  VmaAllocationInfo* pAllocationInfo)
    16693 {
    16694  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
    16695 
    16696  if(pBufferCreateInfo->size == 0)
    16697  {
    16698  return VK_ERROR_VALIDATION_FAILED_EXT;
    16699  }
    16700 
    16701  VMA_DEBUG_LOG("vmaCreateBuffer");
    16702 
    16703  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16704 
    16705  *pBuffer = VK_NULL_HANDLE;
    16706  *pAllocation = VK_NULL_HANDLE;
    16707 
    16708  // 1. Create VkBuffer.
    16709  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
    16710  allocator->m_hDevice,
    16711  pBufferCreateInfo,
    16712  allocator->GetAllocationCallbacks(),
    16713  pBuffer);
    16714  if(res >= 0)
    16715  {
    16716  // 2. vkGetBufferMemoryRequirements.
    16717  VkMemoryRequirements vkMemReq = {};
    16718  bool requiresDedicatedAllocation = false;
    16719  bool prefersDedicatedAllocation = false;
    16720  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
    16721  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16722 
    16723  // Make sure alignment requirements for specific buffer usages reported
    16724  // in Physical Device Properties are included in alignment reported by memory requirements.
    16725  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) != 0)
    16726  {
    16727  VMA_ASSERT(vkMemReq.alignment %
    16728  allocator->m_PhysicalDeviceProperties.limits.minTexelBufferOffsetAlignment == 0);
    16729  }
    16730  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) != 0)
    16731  {
    16732  VMA_ASSERT(vkMemReq.alignment %
    16733  allocator->m_PhysicalDeviceProperties.limits.minUniformBufferOffsetAlignment == 0);
    16734  }
    16735  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) != 0)
    16736  {
    16737  VMA_ASSERT(vkMemReq.alignment %
    16738  allocator->m_PhysicalDeviceProperties.limits.minStorageBufferOffsetAlignment == 0);
    16739  }
    16740 
    16741  // 3. Allocate memory using allocator.
    16742  res = allocator->AllocateMemory(
    16743  vkMemReq,
    16744  requiresDedicatedAllocation,
    16745  prefersDedicatedAllocation,
    16746  *pBuffer, // dedicatedBuffer
    16747  VK_NULL_HANDLE, // dedicatedImage
    16748  *pAllocationCreateInfo,
    16749  VMA_SUBALLOCATION_TYPE_BUFFER,
    16750  1, // allocationCount
    16751  pAllocation);
    16752 
    16753 #if VMA_RECORDING_ENABLED
    16754  if(allocator->GetRecorder() != VMA_NULL)
    16755  {
    16756  allocator->GetRecorder()->RecordCreateBuffer(
    16757  allocator->GetCurrentFrameIndex(),
    16758  *pBufferCreateInfo,
    16759  *pAllocationCreateInfo,
    16760  *pAllocation);
    16761  }
    16762 #endif
    16763 
    16764  if(res >= 0)
    16765  {
    16766  // 3. Bind buffer with memory.
    16767  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16768  {
    16769  res = allocator->BindBufferMemory(*pAllocation, *pBuffer);
    16770  }
    16771  if(res >= 0)
    16772  {
    16773  // All steps succeeded.
    16774  #if VMA_STATS_STRING_ENABLED
    16775  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
    16776  #endif
    16777  if(pAllocationInfo != VMA_NULL)
    16778  {
    16779  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16780  }
    16781 
    16782  return VK_SUCCESS;
    16783  }
    16784  allocator->FreeMemory(
    16785  1, // allocationCount
    16786  pAllocation);
    16787  *pAllocation = VK_NULL_HANDLE;
    16788  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16789  *pBuffer = VK_NULL_HANDLE;
    16790  return res;
    16791  }
    16792  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
    16793  *pBuffer = VK_NULL_HANDLE;
    16794  return res;
    16795  }
    16796  return res;
    16797 }
    16798 
    16799 void vmaDestroyBuffer(
    16800  VmaAllocator allocator,
    16801  VkBuffer buffer,
    16802  VmaAllocation allocation)
    16803 {
    16804  VMA_ASSERT(allocator);
    16805 
    16806  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16807  {
    16808  return;
    16809  }
    16810 
    16811  VMA_DEBUG_LOG("vmaDestroyBuffer");
    16812 
    16813  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16814 
    16815 #if VMA_RECORDING_ENABLED
    16816  if(allocator->GetRecorder() != VMA_NULL)
    16817  {
    16818  allocator->GetRecorder()->RecordDestroyBuffer(
    16819  allocator->GetCurrentFrameIndex(),
    16820  allocation);
    16821  }
    16822 #endif
    16823 
    16824  if(buffer != VK_NULL_HANDLE)
    16825  {
    16826  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
    16827  }
    16828 
    16829  if(allocation != VK_NULL_HANDLE)
    16830  {
    16831  allocator->FreeMemory(
    16832  1, // allocationCount
    16833  &allocation);
    16834  }
    16835 }
    16836 
    16837 VkResult vmaCreateImage(
    16838  VmaAllocator allocator,
    16839  const VkImageCreateInfo* pImageCreateInfo,
    16840  const VmaAllocationCreateInfo* pAllocationCreateInfo,
    16841  VkImage* pImage,
    16842  VmaAllocation* pAllocation,
    16843  VmaAllocationInfo* pAllocationInfo)
    16844 {
    16845  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
    16846 
    16847  if(pImageCreateInfo->extent.width == 0 ||
    16848  pImageCreateInfo->extent.height == 0 ||
    16849  pImageCreateInfo->extent.depth == 0 ||
    16850  pImageCreateInfo->mipLevels == 0 ||
    16851  pImageCreateInfo->arrayLayers == 0)
    16852  {
    16853  return VK_ERROR_VALIDATION_FAILED_EXT;
    16854  }
    16855 
    16856  VMA_DEBUG_LOG("vmaCreateImage");
    16857 
    16858  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16859 
    16860  *pImage = VK_NULL_HANDLE;
    16861  *pAllocation = VK_NULL_HANDLE;
    16862 
    16863  // 1. Create VkImage.
    16864  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
    16865  allocator->m_hDevice,
    16866  pImageCreateInfo,
    16867  allocator->GetAllocationCallbacks(),
    16868  pImage);
    16869  if(res >= 0)
    16870  {
    16871  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
    16872  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
    16873  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
    16874 
    16875  // 2. Allocate memory using allocator.
    16876  VkMemoryRequirements vkMemReq = {};
    16877  bool requiresDedicatedAllocation = false;
    16878  bool prefersDedicatedAllocation = false;
    16879  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
    16880  requiresDedicatedAllocation, prefersDedicatedAllocation);
    16881 
    16882  res = allocator->AllocateMemory(
    16883  vkMemReq,
    16884  requiresDedicatedAllocation,
    16885  prefersDedicatedAllocation,
    16886  VK_NULL_HANDLE, // dedicatedBuffer
    16887  *pImage, // dedicatedImage
    16888  *pAllocationCreateInfo,
    16889  suballocType,
    16890  1, // allocationCount
    16891  pAllocation);
    16892 
    16893 #if VMA_RECORDING_ENABLED
    16894  if(allocator->GetRecorder() != VMA_NULL)
    16895  {
    16896  allocator->GetRecorder()->RecordCreateImage(
    16897  allocator->GetCurrentFrameIndex(),
    16898  *pImageCreateInfo,
    16899  *pAllocationCreateInfo,
    16900  *pAllocation);
    16901  }
    16902 #endif
    16903 
    16904  if(res >= 0)
    16905  {
    16906  // 3. Bind image with memory.
    16907  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
    16908  {
    16909  res = allocator->BindImageMemory(*pAllocation, *pImage);
    16910  }
    16911  if(res >= 0)
    16912  {
    16913  // All steps succeeded.
    16914  #if VMA_STATS_STRING_ENABLED
    16915  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
    16916  #endif
    16917  if(pAllocationInfo != VMA_NULL)
    16918  {
    16919  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
    16920  }
    16921 
    16922  return VK_SUCCESS;
    16923  }
    16924  allocator->FreeMemory(
    16925  1, // allocationCount
    16926  pAllocation);
    16927  *pAllocation = VK_NULL_HANDLE;
    16928  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16929  *pImage = VK_NULL_HANDLE;
    16930  return res;
    16931  }
    16932  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
    16933  *pImage = VK_NULL_HANDLE;
    16934  return res;
    16935  }
    16936  return res;
    16937 }
    16938 
    16939 void vmaDestroyImage(
    16940  VmaAllocator allocator,
    16941  VkImage image,
    16942  VmaAllocation allocation)
    16943 {
    16944  VMA_ASSERT(allocator);
    16945 
    16946  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
    16947  {
    16948  return;
    16949  }
    16950 
    16951  VMA_DEBUG_LOG("vmaDestroyImage");
    16952 
    16953  VMA_DEBUG_GLOBAL_MUTEX_LOCK
    16954 
    16955 #if VMA_RECORDING_ENABLED
    16956  if(allocator->GetRecorder() != VMA_NULL)
    16957  {
    16958  allocator->GetRecorder()->RecordDestroyImage(
    16959  allocator->GetCurrentFrameIndex(),
    16960  allocation);
    16961  }
    16962 #endif
    16963 
    16964  if(image != VK_NULL_HANDLE)
    16965  {
    16966  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
    16967  }
    16968  if(allocation != VK_NULL_HANDLE)
    16969  {
    16970  allocator->FreeMemory(
    16971  1, // allocationCount
    16972  &allocation);
    16973  }
    16974 }
    16975 
    16976 #endif // #ifdef VMA_IMPLEMENTATION
    PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
    Definition: vk_mem_alloc.h:1753
    Set this flag if the allocation should have its own memory block.
    Definition: vk_mem_alloc.h:2053
    void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
    Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
    VkPhysicalDevice physicalDevice
    Vulkan physical device.
    Definition: vk_mem_alloc.h:1811
    -
    uint32_t maxCpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
    Definition: vk_mem_alloc.h:2856
    +
    uint32_t maxCpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
    Definition: vk_mem_alloc.h:2864
    VkResult vmaDefragment(VmaAllocator allocator, VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
    Deprecated. Compacts memory by moving allocations.
    void vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Invalidates memory of given allocation.
    Represents single memory allocation.
    @@ -80,17 +80,17 @@ $(function() {
    struct VmaStats VmaStats
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:2015
    Definition: vk_mem_alloc.h:2119
    -
    VmaDefragmentationFlags flags
    Reserved for future use. Should be 0.
    Definition: vk_mem_alloc.h:2809
    +
    VmaDefragmentationFlags flags
    Reserved for future use. Should be 0.
    Definition: vk_mem_alloc.h:2817
    PFN_vkMapMemory vkMapMemory
    Definition: vk_mem_alloc.h:1757
    VkDeviceMemory deviceMemory
    Handle to Vulkan memory object.
    Definition: vk_mem_alloc.h:2484
    VmaAllocatorCreateFlags flags
    Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
    Definition: vk_mem_alloc.h:1808
    -
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2892
    +
    uint32_t maxAllocationsToMove
    Maximum number of allocations that can be moved to different place.
    Definition: vk_mem_alloc.h:2900
    Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
    Definition: vk_mem_alloc.h:2273
    #define VMA_RECORDING_ENABLED
    Definition: vk_mem_alloc.h:1652
    void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
    Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
    VkDeviceSize size
    Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
    Definition: vk_mem_alloc.h:2365
    Definition: vk_mem_alloc.h:2090
    -
    uint32_t allocationCount
    Number of allocations in pAllocations array.
    Definition: vk_mem_alloc.h:2812
    +
    uint32_t allocationCount
    Number of allocations in pAllocations array.
    Definition: vk_mem_alloc.h:2820
    VkFlags VmaAllocatorCreateFlags
    Definition: vk_mem_alloc.h:1746
    VkMemoryPropertyFlags preferredFlags
    Flags that preferably should be set in a memory type chosen for an allocation.
    Definition: vk_mem_alloc.h:2172
    Definition: vk_mem_alloc.h:2042
    @@ -104,16 +104,16 @@ $(function() {
    void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
    Returns current information about specified allocation and atomically marks it as used in current fra...
    VkDeviceSize allocationSizeMax
    Definition: vk_mem_alloc.h:1946
    PFN_vkBindImageMemory vkBindImageMemory
    Definition: vk_mem_alloc.h:1762
    -
    VmaPool * pPools
    Either null or pointer to array of pools to be defragmented.
    Definition: vk_mem_alloc.h:2846
    +
    VmaPool * pPools
    Either null or pointer to array of pools to be defragmented.
    Definition: vk_mem_alloc.h:2854
    VkDeviceSize unusedBytes
    Total number of bytes occupied by unused ranges.
    Definition: vk_mem_alloc.h:1945
    -
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2896
    +
    Statistics returned by function vmaDefragment().
    Definition: vk_mem_alloc.h:2904
    void vmaFreeMemory(VmaAllocator allocator, VmaAllocation allocation)
    Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
    uint32_t frameInUseCount
    Maximum number of additional frames that are in use at the same time as current frame.
    Definition: vk_mem_alloc.h:1837
    VmaStatInfo total
    Definition: vk_mem_alloc.h:1955
    -
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2904
    +
    uint32_t deviceMemoryBlocksFreed
    Number of empty VkDeviceMemory objects that have been released to the system.
    Definition: vk_mem_alloc.h:2912
    VmaAllocationCreateFlags flags
    Use VmaAllocationCreateFlagBits enum.
    Definition: vk_mem_alloc.h:2156
    Definition: vk_mem_alloc.h:2114
    -
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2887
    +
    VkDeviceSize maxBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2895
    PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
    Definition: vk_mem_alloc.h:1763
    void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called after successful vkAllocateMemory.
    Definition: vk_mem_alloc.h:1688
    Represents main object of this library initialized.
    @@ -136,15 +136,15 @@ $(function() {
    Definition: vk_mem_alloc.h:1744
    uint32_t memoryTypeIndex
    Vulkan memory type index to allocate this pool from.
    Definition: vk_mem_alloc.h:2318
    VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
    -
    VkDeviceSize maxGpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2861
    +
    VkDeviceSize maxGpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2869
    VmaMemoryUsage
    Definition: vk_mem_alloc.h:1993
    struct VmaAllocationInfo VmaAllocationInfo
    Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
    -
    VmaAllocation * pAllocations
    Pointer to array of allocations that can be defragmented.
    Definition: vk_mem_alloc.h:2821
    +
    VmaAllocation * pAllocations
    Pointer to array of allocations that can be defragmented.
    Definition: vk_mem_alloc.h:2829
    void vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
    Flushes memory of given allocation.
    -
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2882
    +
    Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
    Definition: vk_mem_alloc.h:2890
    struct VmaPoolCreateInfo VmaPoolCreateInfo
    Describes parameter of created VmaPool.
    void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
    Destroys VmaPool object and frees Vulkan device memory.
    -
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
    Definition: vk_mem_alloc.h:2900
    +
    VkDeviceSize bytesFreed
    Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
    Definition: vk_mem_alloc.h:2908
    Definition: vk_mem_alloc.h:2032
    uint32_t memoryTypeBits
    Bitmask containing one bit set for every memory type acceptable for this allocation.
    Definition: vk_mem_alloc.h:2180
    PFN_vkBindBufferMemory vkBindBufferMemory
    Definition: vk_mem_alloc.h:1761
    @@ -154,18 +154,18 @@ $(function() {
    VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
    Ends defragmentation process.
    General statistics from current state of Allocator.
    Definition: vk_mem_alloc.h:1951
    void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size)
    Callback function called before vkFreeMemory.
    Definition: vk_mem_alloc.h:1694
    -
    VkFlags VmaDefragmentationFlags
    Definition: vk_mem_alloc.h:2800
    +
    VkFlags VmaDefragmentationFlags
    Definition: vk_mem_alloc.h:2808
    void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
    Sets pUserData in given allocation to new value.
    -
    Definition: vk_mem_alloc.h:2798
    +
    Definition: vk_mem_alloc.h:2806
    -
    VkBool32 * pAllocationsChanged
    Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
    Definition: vk_mem_alloc.h:2827
    +
    VkBool32 * pAllocationsChanged
    Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
    Definition: vk_mem_alloc.h:2835
    VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
    Allocates Vulkan device memory and creates VmaPool object.
    VmaAllocatorCreateFlagBits
    Flags for created VmaAllocator.
    Definition: vk_mem_alloc.h:1715
    VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
    Binds image to allocation.
    struct VmaStatInfo VmaStatInfo
    Calculated statistics of memory usage in entire allocator.
    VkFlags VmaRecordFlags
    Definition: vk_mem_alloc.h:1787
    Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
    Definition: vk_mem_alloc.h:1720
    -
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2902
    +
    uint32_t allocationsMoved
    Number of allocations that have been moved to different places.
    Definition: vk_mem_alloc.h:2910
    void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
    Creates new allocation that is in lost state from the beginning.
    VkMemoryPropertyFlags requiredFlags
    Flags that must be set in a Memory Type chosen for an allocation.
    Definition: vk_mem_alloc.h:2167
    VkDeviceSize unusedRangeSizeMax
    Size of the largest continuous free memory region available for new allocation.
    Definition: vk_mem_alloc.h:2381
    @@ -195,7 +195,7 @@ $(function() {
    const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
    Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
    Definition: vk_mem_alloc.h:1823
    size_t unusedRangeCount
    Number of continuous memory ranges in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2374
    VkFlags VmaAllocationCreateFlags
    Definition: vk_mem_alloc.h:2151
    -
    VkDeviceSize maxCpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2851
    +
    VkDeviceSize maxCpuBytesToMove
    Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
    Definition: vk_mem_alloc.h:2859
    VmaPool pool
    Pool that this allocation should be created in.
    Definition: vk_mem_alloc.h:2186
    void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
    const VkDeviceSize * pHeapSizeLimit
    Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
    Definition: vk_mem_alloc.h:1862
    @@ -205,8 +205,8 @@ $(function() {
    VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
    Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
    PFN_vkCreateImage vkCreateImage
    Definition: vk_mem_alloc.h:1767
    VmaRecordFlags flags
    Flags for recording. Use VmaRecordFlagBits enum.
    Definition: vk_mem_alloc.h:1793
    -
    VmaDefragmentationFlagBits
    Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
    Definition: vk_mem_alloc.h:2797
    -
    VkCommandBuffer commandBuffer
    Optional. Command buffer where GPU copy commands will be posted.
    Definition: vk_mem_alloc.h:2875
    +
    VmaDefragmentationFlagBits
    Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
    Definition: vk_mem_alloc.h:2805
    +
    VkCommandBuffer commandBuffer
    Optional. Command buffer where GPU copy commands will be posted.
    Definition: vk_mem_alloc.h:2883
    PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
    Optional, can be null.
    Definition: vk_mem_alloc.h:1709
    PFN_vkDestroyBuffer vkDestroyBuffer
    Definition: vk_mem_alloc.h:1766
    VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
    Maps memory represented by given allocation and returns pointer to it.
    @@ -224,13 +224,13 @@ $(function() {
    struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
    Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
    VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
    Checks magic number in margins around all allocations in given memory types (in both default and cust...
    Describes parameter of existing VmaPool.
    Definition: vk_mem_alloc.h:2362
    -
    Parameters for defragmentation.
    Definition: vk_mem_alloc.h:2806
    +
    Parameters for defragmentation.
    Definition: vk_mem_alloc.h:2814
    VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
    Checks magic number in margins around all allocations in given memory pool in search for corruptions.
    Definition: vk_mem_alloc.h:2123
    VkDeviceSize offset
    Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
    Definition: vk_mem_alloc.h:2489
    Definition: vk_mem_alloc.h:2137
    Definition: vk_mem_alloc.h:2149
    -
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2898
    +
    VkDeviceSize bytesMoved
    Total number of bytes that have been copied while moving allocations to different places.
    Definition: vk_mem_alloc.h:2906
    Pointers to some Vulkan functions - a subset used by the library.
    Definition: vk_mem_alloc.h:1752
    VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
    Creates Allocator object.
    uint32_t unusedRangeCount
    Number of free ranges of memory between allocations.
    Definition: vk_mem_alloc.h:1941
    @@ -267,10 +267,10 @@ $(function() {
    VkDeviceSize unusedSize
    Total number of bytes in the pool not used by any VmaAllocation.
    Definition: vk_mem_alloc.h:2368
    VkDeviceSize unusedRangeSizeMax
    Definition: vk_mem_alloc.h:1947
    struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
    Parameters for defragmentation.
    -
    uint32_t maxGpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
    Definition: vk_mem_alloc.h:2866
    +
    uint32_t maxGpuAllocationsToMove
    Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
    Definition: vk_mem_alloc.h:2874
    struct VmaRecordSettings VmaRecordSettings
    Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
    uint32_t memoryType
    Memory type index that this allocation was allocated from.
    Definition: vk_mem_alloc.h:2475
    -
    uint32_t poolCount
    Numer of pools in pPools array.
    Definition: vk_mem_alloc.h:2830
    +
    uint32_t poolCount
    Numer of pools in pPools array.
    Definition: vk_mem_alloc.h:2838