Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2021 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
2020 #ifdef __cplusplus
2021 extern "C" {
2022 #endif
2023 
2024 /*
2025 Define this macro to 0/1 to disable/enable support for recording functionality,
2026 available through VmaAllocatorCreateInfo::pRecordSettings.
2027 */
2028 #ifndef VMA_RECORDING_ENABLED
2029  #define VMA_RECORDING_ENABLED 0
2030 #endif
2031 
2032 #if !defined(NOMINMAX) && defined(VMA_IMPLEMENTATION)
2033  #define NOMINMAX // For windows.h
2034 #endif
2035 
2036 #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
2037  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
2038  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
2039  extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
2040  extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
2041  extern PFN_vkAllocateMemory vkAllocateMemory;
2042  extern PFN_vkFreeMemory vkFreeMemory;
2043  extern PFN_vkMapMemory vkMapMemory;
2044  extern PFN_vkUnmapMemory vkUnmapMemory;
2045  extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
2046  extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
2047  extern PFN_vkBindBufferMemory vkBindBufferMemory;
2048  extern PFN_vkBindImageMemory vkBindImageMemory;
2049  extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
2050  extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
2051  extern PFN_vkCreateBuffer vkCreateBuffer;
2052  extern PFN_vkDestroyBuffer vkDestroyBuffer;
2053  extern PFN_vkCreateImage vkCreateImage;
2054  extern PFN_vkDestroyImage vkDestroyImage;
2055  extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
2056  #if VMA_VULKAN_VERSION >= 1001000
2057  extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
2058  extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
2059  extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
2060  extern PFN_vkBindImageMemory2 vkBindImageMemory2;
2061  extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
2062  #endif // #if VMA_VULKAN_VERSION >= 1001000
2063 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
2064 
2065 #ifndef VULKAN_H_
2066  #include <vulkan/vulkan.h>
2067 #endif
2068 
2069 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
2070 // where AAA = major, BBB = minor, CCC = patch.
2071 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
2072 #if !defined(VMA_VULKAN_VERSION)
2073  #if defined(VK_VERSION_1_2)
2074  #define VMA_VULKAN_VERSION 1002000
2075  #elif defined(VK_VERSION_1_1)
2076  #define VMA_VULKAN_VERSION 1001000
2077  #else
2078  #define VMA_VULKAN_VERSION 1000000
2079  #endif
2080 #endif
2081 
2082 #if !defined(VMA_DEDICATED_ALLOCATION)
2083  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
2084  #define VMA_DEDICATED_ALLOCATION 1
2085  #else
2086  #define VMA_DEDICATED_ALLOCATION 0
2087  #endif
2088 #endif
2089 
2090 #if !defined(VMA_BIND_MEMORY2)
2091  #if VK_KHR_bind_memory2
2092  #define VMA_BIND_MEMORY2 1
2093  #else
2094  #define VMA_BIND_MEMORY2 0
2095  #endif
2096 #endif
2097 
2098 #if !defined(VMA_MEMORY_BUDGET)
2099  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
2100  #define VMA_MEMORY_BUDGET 1
2101  #else
2102  #define VMA_MEMORY_BUDGET 0
2103  #endif
2104 #endif
2105 
2106 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
2107 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
2108  #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
2109  #define VMA_BUFFER_DEVICE_ADDRESS 1
2110  #else
2111  #define VMA_BUFFER_DEVICE_ADDRESS 0
2112  #endif
2113 #endif
2114 
2115 // Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
2116 #if !defined(VMA_MEMORY_PRIORITY)
2117  #if VK_EXT_memory_priority
2118  #define VMA_MEMORY_PRIORITY 1
2119  #else
2120  #define VMA_MEMORY_PRIORITY 0
2121  #endif
2122 #endif
2123 
2124 // Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
2125 #if !defined(VMA_EXTERNAL_MEMORY)
2126  #if VK_KHR_external_memory
2127  #define VMA_EXTERNAL_MEMORY 1
2128  #else
2129  #define VMA_EXTERNAL_MEMORY 0
2130  #endif
2131 #endif
2132 
2133 // Define these macros to decorate all public functions with additional code,
2134 // before and after returned type, appropriately. This may be useful for
2135 // exporting the functions when compiling VMA as a separate library. Example:
2136 // #define VMA_CALL_PRE __declspec(dllexport)
2137 // #define VMA_CALL_POST __cdecl
2138 #ifndef VMA_CALL_PRE
2139  #define VMA_CALL_PRE
2140 #endif
2141 #ifndef VMA_CALL_POST
2142  #define VMA_CALL_POST
2143 #endif
2144 
2145 // Define this macro to decorate pointers with an attribute specifying the
2146 // length of the array they point to if they are not null.
2147 //
2148 // The length may be one of
2149 // - The name of another parameter in the argument list where the pointer is declared
2150 // - The name of another member in the struct where the pointer is declared
2151 // - The name of a member of a struct type, meaning the value of that member in
2152 // the context of the call. For example
2153 // VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
2154 // this means the number of memory heaps available in the device associated
2155 // with the VmaAllocator being dealt with.
2156 #ifndef VMA_LEN_IF_NOT_NULL
2157  #define VMA_LEN_IF_NOT_NULL(len)
2158 #endif
2159 
2160 // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
2161 // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
2162 #ifndef VMA_NULLABLE
2163  #ifdef __clang__
2164  #define VMA_NULLABLE _Nullable
2165  #else
2166  #define VMA_NULLABLE
2167  #endif
2168 #endif
2169 
2170 // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
2171 // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
2172 #ifndef VMA_NOT_NULL
2173  #ifdef __clang__
2174  #define VMA_NOT_NULL _Nonnull
2175  #else
2176  #define VMA_NOT_NULL
2177  #endif
2178 #endif
2179 
2180 // If non-dispatchable handles are represented as pointers then we can give
2181 // then nullability annotations
2182 #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
2183  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2184  #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
2185  #else
2186  #define VMA_NOT_NULL_NON_DISPATCHABLE
2187  #endif
2188 #endif
2189 
2190 #ifndef VMA_NULLABLE_NON_DISPATCHABLE
2191  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2192  #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
2193  #else
2194  #define VMA_NULLABLE_NON_DISPATCHABLE
2195  #endif
2196 #endif
2197 
2207 VK_DEFINE_HANDLE(VmaAllocator)
2208 
2209 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
2211  VmaAllocator VMA_NOT_NULL allocator,
2212  uint32_t memoryType,
2213  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2214  VkDeviceSize size,
2215  void* VMA_NULLABLE pUserData);
2217 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
2218  VmaAllocator VMA_NOT_NULL allocator,
2219  uint32_t memoryType,
2220  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2221  VkDeviceSize size,
2222  void* VMA_NULLABLE pUserData);
2223 
2237  void* VMA_NULLABLE pUserData;
2239 
2352 
2355 typedef VkFlags VmaAllocatorCreateFlags;
2356 
2361 typedef struct VmaVulkanFunctions {
2362  PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
2363  PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
2364  PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
2365  PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
2366  PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
2367  PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
2368  PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
2369  PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
2370  PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
2371  PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
2372  PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
2373  PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
2374  PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
2375  PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
2376  PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
2377  PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
2378  PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
2379 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2380  PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
2381  PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
2382 #endif
2383 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2384  PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
2385  PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
2386 #endif
2387 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2388  PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
2389 #endif
2391 
2393 typedef enum VmaRecordFlagBits {
2400 
2401  VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
2403 typedef VkFlags VmaRecordFlags;
2404 
2406 typedef struct VmaRecordSettings
2407 {
2417  const char* VMA_NOT_NULL pFilePath;
2419 
2422 {
2426 
2427  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2429 
2430  VkDevice VMA_NOT_NULL device;
2432 
2435 
2436  const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
2438 
2478  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
2479 
2491  const VmaRecordSettings* VMA_NULLABLE pRecordSettings;
2496  VkInstance VMA_NOT_NULL instance;
2506 #if VMA_EXTERNAL_MEMORY
2516  const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
2517 #endif // #if VMA_EXTERNAL_MEMORY
2519 
2521 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2522  const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
2523  VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator);
2524 
2526 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2527  VmaAllocator VMA_NULLABLE allocator);
2528 
2531 typedef struct VmaAllocatorInfo
2532 {
2537  VkInstance VMA_NOT_NULL instance;
2542  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2547  VkDevice VMA_NOT_NULL device;
2549 
2555 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
2556 
2561 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2562  VmaAllocator VMA_NOT_NULL allocator,
2563  const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties);
2564 
2569 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2570  VmaAllocator VMA_NOT_NULL allocator,
2571  const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
2572 
2579 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2580  VmaAllocator VMA_NOT_NULL allocator,
2581  uint32_t memoryTypeIndex,
2582  VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
2583 
2592 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2593  VmaAllocator VMA_NOT_NULL allocator,
2594  uint32_t frameIndex);
2595 
2598 typedef struct VmaStatInfo
2599 {
2601  uint32_t blockCount;
2607  VkDeviceSize usedBytes;
2609  VkDeviceSize unusedBytes;
2613 
2615 typedef struct VmaStats
2616 {
2617  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2618  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2621 
2631 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2632  VmaAllocator VMA_NOT_NULL allocator,
2633  VmaStats* VMA_NOT_NULL pStats);
2634 
2637 typedef struct VmaBudget
2638 {
2641  VkDeviceSize blockBytes;
2642 
2652  VkDeviceSize allocationBytes;
2653 
2662  VkDeviceSize usage;
2663 
2673  VkDeviceSize budget;
2675 
2686 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2687  VmaAllocator VMA_NOT_NULL allocator,
2688  VmaBudget* VMA_NOT_NULL pBudget);
2689 
2690 #ifndef VMA_STATS_STRING_ENABLED
2691 #define VMA_STATS_STRING_ENABLED 1
2692 #endif
2693 
2694 #if VMA_STATS_STRING_ENABLED
2695 
2697 
2699 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2700  VmaAllocator VMA_NOT_NULL allocator,
2701  char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString,
2702  VkBool32 detailedMap);
2703 
2704 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2705  VmaAllocator VMA_NOT_NULL allocator,
2706  char* VMA_NULLABLE pStatsString);
2707 
2708 #endif // #if VMA_STATS_STRING_ENABLED
2709 
2718 VK_DEFINE_HANDLE(VmaPool)
2719 
2720 typedef enum VmaMemoryUsage
2721 {
2783 
2784  VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
2786 
2796 
2861 
2877 
2887 
2894 
2898 
2900 {
2913  VkMemoryPropertyFlags requiredFlags;
2918  VkMemoryPropertyFlags preferredFlags;
2926  uint32_t memoryTypeBits;
2932  VmaPool VMA_NULLABLE pool;
2939  void* VMA_NULLABLE pUserData;
2946  float priority;
2948 
2965 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2966  VmaAllocator VMA_NOT_NULL allocator,
2967  uint32_t memoryTypeBits,
2968  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2969  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2970 
2983 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2984  VmaAllocator VMA_NOT_NULL allocator,
2985  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2986  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2987  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2988 
3001 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
3002  VmaAllocator VMA_NOT_NULL allocator,
3003  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
3004  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3005  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
3006 
3027 
3044 
3055 
3061 
3064 typedef VkFlags VmaPoolCreateFlags;
3065 
3068 typedef struct VmaPoolCreateInfo {
3083  VkDeviceSize blockSize;
3116  float priority;
3133  void* VMA_NULLABLE pMemoryAllocateNext;
3135 
3138 typedef struct VmaPoolStats {
3141  VkDeviceSize size;
3144  VkDeviceSize unusedSize;
3157  VkDeviceSize unusedRangeSizeMax;
3160  size_t blockCount;
3162 
3169 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
3170  VmaAllocator VMA_NOT_NULL allocator,
3171  const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
3172  VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool);
3173 
3176 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
3177  VmaAllocator VMA_NOT_NULL allocator,
3178  VmaPool VMA_NULLABLE pool);
3179 
3186 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
3187  VmaAllocator VMA_NOT_NULL allocator,
3188  VmaPool VMA_NOT_NULL pool,
3189  VmaPoolStats* VMA_NOT_NULL pPoolStats);
3190 
3197 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
3198  VmaAllocator VMA_NOT_NULL allocator,
3199  VmaPool VMA_NOT_NULL pool,
3200  size_t* VMA_NULLABLE pLostAllocationCount);
3201 
3216 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool);
3217 
3224 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
3225  VmaAllocator VMA_NOT_NULL allocator,
3226  VmaPool VMA_NOT_NULL pool,
3227  const char* VMA_NULLABLE * VMA_NOT_NULL ppName);
3228 
3234 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
3235  VmaAllocator VMA_NOT_NULL allocator,
3236  VmaPool VMA_NOT_NULL pool,
3237  const char* VMA_NULLABLE pName);
3238 
3263 VK_DEFINE_HANDLE(VmaAllocation)
3264 
3265 
3267 typedef struct VmaAllocationInfo {
3272  uint32_t memoryType;
3281  VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
3291  VkDeviceSize offset;
3302  VkDeviceSize size;
3311  void* VMA_NULLABLE pMappedData;
3316  void* VMA_NULLABLE pUserData;
3318 
3329 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3330  VmaAllocator VMA_NOT_NULL allocator,
3331  const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
3332  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3333  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3334  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3335 
3355 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3356  VmaAllocator VMA_NOT_NULL allocator,
3357  const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
3358  const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
3359  size_t allocationCount,
3360  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3361  VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
3362 
3369 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3370  VmaAllocator VMA_NOT_NULL allocator,
3371  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3372  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3373  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3374  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3375 
3377 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3378  VmaAllocator VMA_NOT_NULL allocator,
3379  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3380  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3381  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3382  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3383 
3388 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3389  VmaAllocator VMA_NOT_NULL allocator,
3390  const VmaAllocation VMA_NULLABLE allocation);
3391 
3402 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3403  VmaAllocator VMA_NOT_NULL allocator,
3404  size_t allocationCount,
3405  const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
3406 
3423 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3424  VmaAllocator VMA_NOT_NULL allocator,
3425  VmaAllocation VMA_NOT_NULL allocation,
3426  VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
3427 
3442 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3443  VmaAllocator VMA_NOT_NULL allocator,
3444  VmaAllocation VMA_NOT_NULL allocation);
3445 
3459 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3460  VmaAllocator VMA_NOT_NULL allocator,
3461  VmaAllocation VMA_NOT_NULL allocation,
3462  void* VMA_NULLABLE pUserData);
3463 
3474 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3475  VmaAllocator VMA_NOT_NULL allocator,
3476  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation);
3477 
3516 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3517  VmaAllocator VMA_NOT_NULL allocator,
3518  VmaAllocation VMA_NOT_NULL allocation,
3519  void* VMA_NULLABLE * VMA_NOT_NULL ppData);
3520 
3529 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3530  VmaAllocator VMA_NOT_NULL allocator,
3531  VmaAllocation VMA_NOT_NULL allocation);
3532 
3554 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
3555  VmaAllocator VMA_NOT_NULL allocator,
3556  VmaAllocation VMA_NOT_NULL allocation,
3557  VkDeviceSize offset,
3558  VkDeviceSize size);
3559 
3581 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
3582  VmaAllocator VMA_NOT_NULL allocator,
3583  VmaAllocation VMA_NOT_NULL allocation,
3584  VkDeviceSize offset,
3585  VkDeviceSize size);
3586 
3601 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
3602  VmaAllocator VMA_NOT_NULL allocator,
3603  uint32_t allocationCount,
3604  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3605  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3606  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3607 
3622 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
3623  VmaAllocator VMA_NOT_NULL allocator,
3624  uint32_t allocationCount,
3625  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3626  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3627  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3628 
3645 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits);
3646 
3653 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3654 
3655 typedef enum VmaDefragmentationFlagBits {
3660 typedef VkFlags VmaDefragmentationFlags;
3661 
3666 typedef struct VmaDefragmentationInfo2 {
3681  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
3687  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged;
3690  uint32_t poolCount;
3706  const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
3711  VkDeviceSize maxCpuBytesToMove;
3721  VkDeviceSize maxGpuBytesToMove;
3735  VkCommandBuffer VMA_NULLABLE commandBuffer;
3737 
3740  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory;
3741  VkDeviceSize offset;
3743 
3749  uint32_t moveCount;
3750  VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
3752 
3757 typedef struct VmaDefragmentationInfo {
3762  VkDeviceSize maxBytesToMove;
3769 
3771 typedef struct VmaDefragmentationStats {
3773  VkDeviceSize bytesMoved;
3775  VkDeviceSize bytesFreed;
3781 
3811 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3812  VmaAllocator VMA_NOT_NULL allocator,
3813  const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo,
3814  VmaDefragmentationStats* VMA_NULLABLE pStats,
3815  VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext);
3816 
3822 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3823  VmaAllocator VMA_NOT_NULL allocator,
3824  VmaDefragmentationContext VMA_NULLABLE context);
3825 
3826 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3827  VmaAllocator VMA_NOT_NULL allocator,
3828  VmaDefragmentationContext VMA_NULLABLE context,
3829  VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo
3830 );
3831 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3832  VmaAllocator VMA_NOT_NULL allocator,
3833  VmaDefragmentationContext VMA_NULLABLE context
3834 );
3835 
3876 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3877  VmaAllocator VMA_NOT_NULL allocator,
3878  const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3879  size_t allocationCount,
3880  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
3881  const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo,
3882  VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats);
3883 
3896 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3897  VmaAllocator VMA_NOT_NULL allocator,
3898  VmaAllocation VMA_NOT_NULL allocation,
3899  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
3900 
3911 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3912  VmaAllocator VMA_NOT_NULL allocator,
3913  VmaAllocation VMA_NOT_NULL allocation,
3914  VkDeviceSize allocationLocalOffset,
3915  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3916  const void* VMA_NULLABLE pNext);
3917 
3930 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3931  VmaAllocator VMA_NOT_NULL allocator,
3932  VmaAllocation VMA_NOT_NULL allocation,
3933  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
3934 
3945 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3946  VmaAllocator VMA_NOT_NULL allocator,
3947  VmaAllocation VMA_NOT_NULL allocation,
3948  VkDeviceSize allocationLocalOffset,
3949  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3950  const void* VMA_NULLABLE pNext);
3951 
3982 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3983  VmaAllocator VMA_NOT_NULL allocator,
3984  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
3985  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3986  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
3987  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3988  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3989 
4001 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
4002  VmaAllocator VMA_NOT_NULL allocator,
4003  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
4004  VmaAllocation VMA_NULLABLE allocation);
4005 
4007 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
4008  VmaAllocator VMA_NOT_NULL allocator,
4009  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
4010  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
4011  VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
4012  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
4013  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
4014 
4026 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
4027  VmaAllocator VMA_NOT_NULL allocator,
4028  VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
4029  VmaAllocation VMA_NULLABLE allocation);
4030 
4031 #ifdef __cplusplus
4032 }
4033 #endif
4034 
4035 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
4036 
4037 // For Visual Studio IntelliSense.
4038 #if defined(__cplusplus) && defined(__INTELLISENSE__)
4039 #define VMA_IMPLEMENTATION
4040 #endif
4041 
4042 #ifdef VMA_IMPLEMENTATION
4043 #undef VMA_IMPLEMENTATION
4044 
4045 #include <cstdint>
4046 #include <cstdlib>
4047 #include <cstring>
4048 #include <utility>
4049 
4050 #if VMA_RECORDING_ENABLED
4051  #include <chrono>
4052  #if defined(_WIN32)
4053  #include <windows.h>
4054  #else
4055  #include <sstream>
4056  #include <thread>
4057  #endif
4058 #endif
4059 
4060 /*******************************************************************************
4061 CONFIGURATION SECTION
4062 
4063 Define some of these macros before each #include of this header or change them
4064 here if you need other then default behavior depending on your environment.
4065 */
4066 
4067 /*
4068 Define this macro to 1 to make the library fetch pointers to Vulkan functions
4069 internally, like:
4070 
4071  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
4072 */
4073 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
4074  #define VMA_STATIC_VULKAN_FUNCTIONS 1
4075 #endif
4076 
4077 /*
4078 Define this macro to 1 to make the library fetch pointers to Vulkan functions
4079 internally, like:
4080 
4081  vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory);
4082 */
4083 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
4084  #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
4085  #if defined(VK_NO_PROTOTYPES)
4086  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
4087  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
4088  #endif
4089 #endif
4090 
4091 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
4092 //#define VMA_USE_STL_CONTAINERS 1
4093 
4094 /* Set this macro to 1 to make the library including and using STL containers:
4095 std::pair, std::vector, std::list, std::unordered_map.
4096 
4097 Set it to 0 or undefined to make the library using its own implementation of
4098 the containers.
4099 */
4100 #if VMA_USE_STL_CONTAINERS
4101  #define VMA_USE_STL_VECTOR 1
4102  #define VMA_USE_STL_UNORDERED_MAP 1
4103  #define VMA_USE_STL_LIST 1
4104 #endif
4105 
4106 #ifndef VMA_USE_STL_SHARED_MUTEX
4107  // Compiler conforms to C++17.
4108  #if __cplusplus >= 201703L
4109  #define VMA_USE_STL_SHARED_MUTEX 1
4110  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
4111  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
4112  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
4113  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
4114  #define VMA_USE_STL_SHARED_MUTEX 1
4115  #else
4116  #define VMA_USE_STL_SHARED_MUTEX 0
4117  #endif
4118 #endif
4119 
4120 /*
4121 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
4122 Library has its own container implementation.
4123 */
4124 #if VMA_USE_STL_VECTOR
4125  #include <vector>
4126 #endif
4127 
4128 #if VMA_USE_STL_UNORDERED_MAP
4129  #include <unordered_map>
4130 #endif
4131 
4132 #if VMA_USE_STL_LIST
4133  #include <list>
4134 #endif
4135 
4136 /*
4137 Following headers are used in this CONFIGURATION section only, so feel free to
4138 remove them if not needed.
4139 */
4140 #include <cassert> // for assert
4141 #include <algorithm> // for min, max
4142 #include <mutex>
4143 
4144 #ifndef VMA_NULL
4145  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
4146  #define VMA_NULL nullptr
4147 #endif
4148 
4149 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
4150 #include <cstdlib>
4151 static void* vma_aligned_alloc(size_t alignment, size_t size)
4152 {
4153  // alignment must be >= sizeof(void*)
4154  if(alignment < sizeof(void*))
4155  {
4156  alignment = sizeof(void*);
4157  }
4158 
4159  return memalign(alignment, size);
4160 }
4161 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
4162 #include <cstdlib>
4163 
4164 #if defined(__APPLE__)
4165 #include <AvailabilityMacros.h>
4166 #endif
4167 
4168 static void* vma_aligned_alloc(size_t alignment, size_t size)
4169 {
4170 #if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
4171 #if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
4172  // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
4173  // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
4174  // MAC_OS_X_VERSION_10_16), even though the function is marked
4175  // availabe for 10.15. That's why the preprocessor checks for 10.16 but
4176  // the __builtin_available checks for 10.15.
4177  // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
4178  if (__builtin_available(macOS 10.15, iOS 13, *))
4179  return aligned_alloc(alignment, size);
4180 #endif
4181 #endif
4182  // alignment must be >= sizeof(void*)
4183  if(alignment < sizeof(void*))
4184  {
4185  alignment = sizeof(void*);
4186  }
4187 
4188  void *pointer;
4189  if(posix_memalign(&pointer, alignment, size) == 0)
4190  return pointer;
4191  return VMA_NULL;
4192 }
4193 #elif defined(_WIN32)
4194 static void* vma_aligned_alloc(size_t alignment, size_t size)
4195 {
4196  return _aligned_malloc(size, alignment);
4197 }
4198 #else
4199 static void* vma_aligned_alloc(size_t alignment, size_t size)
4200 {
4201  return aligned_alloc(alignment, size);
4202 }
4203 #endif
4204 
4205 #if defined(_WIN32)
4206 static void vma_aligned_free(void* ptr)
4207 {
4208  _aligned_free(ptr);
4209 }
4210 #else
4211 static void vma_aligned_free(void* VMA_NULLABLE ptr)
4212 {
4213  free(ptr);
4214 }
4215 #endif
4216 
4217 // If your compiler is not compatible with C++11 and definition of
4218 // aligned_alloc() function is missing, uncommeting following line may help:
4219 
4220 //#include <malloc.h>
4221 
4222 // Normal assert to check for programmer's errors, especially in Debug configuration.
4223 #ifndef VMA_ASSERT
4224  #ifdef NDEBUG
4225  #define VMA_ASSERT(expr)
4226  #else
4227  #define VMA_ASSERT(expr) assert(expr)
4228  #endif
4229 #endif
4230 
4231 // Assert that will be called very often, like inside data structures e.g. operator[].
4232 // Making it non-empty can make program slow.
4233 #ifndef VMA_HEAVY_ASSERT
4234  #ifdef NDEBUG
4235  #define VMA_HEAVY_ASSERT(expr)
4236  #else
4237  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
4238  #endif
4239 #endif
4240 
4241 #ifndef VMA_ALIGN_OF
4242  #define VMA_ALIGN_OF(type) (__alignof(type))
4243 #endif
4244 
4245 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
4246  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
4247 #endif
4248 
4249 #ifndef VMA_SYSTEM_ALIGNED_FREE
4250  // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
4251  #if defined(VMA_SYSTEM_FREE)
4252  #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
4253  #else
4254  #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
4255  #endif
4256 #endif
4257 
4258 #ifndef VMA_MIN
4259  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
4260 #endif
4261 
4262 #ifndef VMA_MAX
4263  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
4264 #endif
4265 
4266 #ifndef VMA_SWAP
4267  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
4268 #endif
4269 
4270 #ifndef VMA_SORT
4271  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
4272 #endif
4273 
4274 #ifndef VMA_DEBUG_LOG
4275  #define VMA_DEBUG_LOG(format, ...)
4276  /*
4277  #define VMA_DEBUG_LOG(format, ...) do { \
4278  printf(format, __VA_ARGS__); \
4279  printf("\n"); \
4280  } while(false)
4281  */
4282 #endif
4283 
4284 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
4285 #if VMA_STATS_STRING_ENABLED
4286  static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
4287  {
4288  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
4289  }
4290  static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
4291  {
4292  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
4293  }
4294  static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
4295  {
4296  snprintf(outStr, strLen, "%p", ptr);
4297  }
4298 #endif
4299 
4300 #ifndef VMA_MUTEX
4301  class VmaMutex
4302  {
4303  public:
4304  void Lock() { m_Mutex.lock(); }
4305  void Unlock() { m_Mutex.unlock(); }
4306  bool TryLock() { return m_Mutex.try_lock(); }
4307  private:
4308  std::mutex m_Mutex;
4309  };
4310  #define VMA_MUTEX VmaMutex
4311 #endif
4312 
4313 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
4314 #ifndef VMA_RW_MUTEX
4315  #if VMA_USE_STL_SHARED_MUTEX
4316  // Use std::shared_mutex from C++17.
4317  #include <shared_mutex>
4318  class VmaRWMutex
4319  {
4320  public:
4321  void LockRead() { m_Mutex.lock_shared(); }
4322  void UnlockRead() { m_Mutex.unlock_shared(); }
4323  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
4324  void LockWrite() { m_Mutex.lock(); }
4325  void UnlockWrite() { m_Mutex.unlock(); }
4326  bool TryLockWrite() { return m_Mutex.try_lock(); }
4327  private:
4328  std::shared_mutex m_Mutex;
4329  };
4330  #define VMA_RW_MUTEX VmaRWMutex
4331  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
4332  // Use SRWLOCK from WinAPI.
4333  // Minimum supported client = Windows Vista, server = Windows Server 2008.
4334  class VmaRWMutex
4335  {
4336  public:
4337  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
4338  void LockRead() { AcquireSRWLockShared(&m_Lock); }
4339  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
4340  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
4341  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
4342  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
4343  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
4344  private:
4345  SRWLOCK m_Lock;
4346  };
4347  #define VMA_RW_MUTEX VmaRWMutex
4348  #else
4349  // Less efficient fallback: Use normal mutex.
4350  class VmaRWMutex
4351  {
4352  public:
4353  void LockRead() { m_Mutex.Lock(); }
4354  void UnlockRead() { m_Mutex.Unlock(); }
4355  bool TryLockRead() { return m_Mutex.TryLock(); }
4356  void LockWrite() { m_Mutex.Lock(); }
4357  void UnlockWrite() { m_Mutex.Unlock(); }
4358  bool TryLockWrite() { return m_Mutex.TryLock(); }
4359  private:
4360  VMA_MUTEX m_Mutex;
4361  };
4362  #define VMA_RW_MUTEX VmaRWMutex
4363  #endif // #if VMA_USE_STL_SHARED_MUTEX
4364 #endif // #ifndef VMA_RW_MUTEX
4365 
4366 /*
4367 If providing your own implementation, you need to implement a subset of std::atomic.
4368 */
4369 #ifndef VMA_ATOMIC_UINT32
4370  #include <atomic>
4371  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
4372 #endif
4373 
4374 #ifndef VMA_ATOMIC_UINT64
4375  #include <atomic>
4376  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
4377 #endif
4378 
4379 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
4384  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
4385 #endif
4386 
4387 #ifndef VMA_MIN_ALIGNMENT
4392  #ifdef VMA_DEBUG_ALIGNMENT // Old name
4393  #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
4394  #else
4395  #define VMA_MIN_ALIGNMENT (1)
4396  #endif
4397 #endif
4398 
4399 #ifndef VMA_DEBUG_MARGIN
4404  #define VMA_DEBUG_MARGIN (0)
4405 #endif
4406 
4407 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
4412  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
4413 #endif
4414 
4415 #ifndef VMA_DEBUG_DETECT_CORRUPTION
4421  #define VMA_DEBUG_DETECT_CORRUPTION (0)
4422 #endif
4423 
4424 #ifndef VMA_DEBUG_GLOBAL_MUTEX
4429  #define VMA_DEBUG_GLOBAL_MUTEX (0)
4430 #endif
4431 
4432 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4437  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
4438 #endif
4439 
4440 #ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
4441  /*
4442  Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
4443  and return error instead of leaving up to Vulkan implementation what to do in such cases.
4444  */
4445  #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
4446 #endif
4447 
4448 #ifndef VMA_SMALL_HEAP_MAX_SIZE
4450  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
4451 #endif
4452 
4453 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
4455  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
4456 #endif
4457 
4458 #ifndef VMA_CLASS_NO_COPY
4459  #define VMA_CLASS_NO_COPY(className) \
4460  private: \
4461  className(const className&) = delete; \
4462  className& operator=(const className&) = delete;
4463 #endif
4464 
4465 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4466 
4467 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4468 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4469 
4470 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4471 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4472 
4473 /*******************************************************************************
4474 END OF CONFIGURATION
4475 */
4476 
4477 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4478 
4479 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4480 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4481 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4482 
4483 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4484 
4485 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4486  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4487 
4488 // Returns number of bits set to 1 in (v).
4489 static inline uint32_t VmaCountBitsSet(uint32_t v)
4490 {
4491  uint32_t c = v - ((v >> 1) & 0x55555555);
4492  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4493  c = ((c >> 4) + c) & 0x0F0F0F0F;
4494  c = ((c >> 8) + c) & 0x00FF00FF;
4495  c = ((c >> 16) + c) & 0x0000FFFF;
4496  return c;
4497 }
4498 
4499 /*
4500 Returns true if given number is a power of two.
4501 T must be unsigned integer number or signed integer but always nonnegative.
4502 For 0 returns true.
4503 */
4504 template <typename T>
4505 inline bool VmaIsPow2(T x)
4506 {
4507  return (x & (x-1)) == 0;
4508 }
4509 
4510 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4511 // Use types like uint32_t, uint64_t as T.
4512 template <typename T>
4513 static inline T VmaAlignUp(T val, T alignment)
4514 {
4515  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4516  return (val + alignment - 1) & ~(alignment - 1);
4517 }
4518 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4519 // Use types like uint32_t, uint64_t as T.
4520 template <typename T>
4521 static inline T VmaAlignDown(T val, T alignment)
4522 {
4523  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4524  return val & ~(alignment - 1);
4525 }
4526 
4527 // Division with mathematical rounding to nearest number.
4528 template <typename T>
4529 static inline T VmaRoundDiv(T x, T y)
4530 {
4531  return (x + (y / (T)2)) / y;
4532 }
4533 
4534 // Returns smallest power of 2 greater or equal to v.
4535 static inline uint32_t VmaNextPow2(uint32_t v)
4536 {
4537  v--;
4538  v |= v >> 1;
4539  v |= v >> 2;
4540  v |= v >> 4;
4541  v |= v >> 8;
4542  v |= v >> 16;
4543  v++;
4544  return v;
4545 }
4546 static inline uint64_t VmaNextPow2(uint64_t v)
4547 {
4548  v--;
4549  v |= v >> 1;
4550  v |= v >> 2;
4551  v |= v >> 4;
4552  v |= v >> 8;
4553  v |= v >> 16;
4554  v |= v >> 32;
4555  v++;
4556  return v;
4557 }
4558 
4559 // Returns largest power of 2 less or equal to v.
4560 static inline uint32_t VmaPrevPow2(uint32_t v)
4561 {
4562  v |= v >> 1;
4563  v |= v >> 2;
4564  v |= v >> 4;
4565  v |= v >> 8;
4566  v |= v >> 16;
4567  v = v ^ (v >> 1);
4568  return v;
4569 }
4570 static inline uint64_t VmaPrevPow2(uint64_t v)
4571 {
4572  v |= v >> 1;
4573  v |= v >> 2;
4574  v |= v >> 4;
4575  v |= v >> 8;
4576  v |= v >> 16;
4577  v |= v >> 32;
4578  v = v ^ (v >> 1);
4579  return v;
4580 }
4581 
4582 static inline bool VmaStrIsEmpty(const char* pStr)
4583 {
4584  return pStr == VMA_NULL || *pStr == '\0';
4585 }
4586 
4587 #if VMA_STATS_STRING_ENABLED
4588 
4589 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4590 {
4591  switch(algorithm)
4592  {
4594  return "Linear";
4596  return "Buddy";
4597  case 0:
4598  return "Default";
4599  default:
4600  VMA_ASSERT(0);
4601  return "";
4602  }
4603 }
4604 
4605 #endif // #if VMA_STATS_STRING_ENABLED
4606 
4607 #ifndef VMA_SORT
4608 
4609 template<typename Iterator, typename Compare>
4610 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4611 {
4612  Iterator centerValue = end; --centerValue;
4613  Iterator insertIndex = beg;
4614  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4615  {
4616  if(cmp(*memTypeIndex, *centerValue))
4617  {
4618  if(insertIndex != memTypeIndex)
4619  {
4620  VMA_SWAP(*memTypeIndex, *insertIndex);
4621  }
4622  ++insertIndex;
4623  }
4624  }
4625  if(insertIndex != centerValue)
4626  {
4627  VMA_SWAP(*insertIndex, *centerValue);
4628  }
4629  return insertIndex;
4630 }
4631 
4632 template<typename Iterator, typename Compare>
4633 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4634 {
4635  if(beg < end)
4636  {
4637  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4638  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4639  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4640  }
4641 }
4642 
4643 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4644 
4645 #endif // #ifndef VMA_SORT
4646 
4647 /*
4648 Returns true if two memory blocks occupy overlapping pages.
4649 ResourceA must be in less memory offset than ResourceB.
4650 
4651 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4652 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4653 */
4654 static inline bool VmaBlocksOnSamePage(
4655  VkDeviceSize resourceAOffset,
4656  VkDeviceSize resourceASize,
4657  VkDeviceSize resourceBOffset,
4658  VkDeviceSize pageSize)
4659 {
4660  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4661  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4662  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4663  VkDeviceSize resourceBStart = resourceBOffset;
4664  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4665  return resourceAEndPage == resourceBStartPage;
4666 }
4667 
4668 enum VmaSuballocationType
4669 {
4670  VMA_SUBALLOCATION_TYPE_FREE = 0,
4671  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4672  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4673  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4674  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4675  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4676  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4677 };
4678 
4679 /*
4680 Returns true if given suballocation types could conflict and must respect
4681 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4682 or linear image and another one is optimal image. If type is unknown, behave
4683 conservatively.
4684 */
4685 static inline bool VmaIsBufferImageGranularityConflict(
4686  VmaSuballocationType suballocType1,
4687  VmaSuballocationType suballocType2)
4688 {
4689  if(suballocType1 > suballocType2)
4690  {
4691  VMA_SWAP(suballocType1, suballocType2);
4692  }
4693 
4694  switch(suballocType1)
4695  {
4696  case VMA_SUBALLOCATION_TYPE_FREE:
4697  return false;
4698  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4699  return true;
4700  case VMA_SUBALLOCATION_TYPE_BUFFER:
4701  return
4702  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4703  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4704  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4705  return
4706  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4707  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4708  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4709  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4710  return
4711  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4712  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4713  return false;
4714  default:
4715  VMA_ASSERT(0);
4716  return true;
4717  }
4718 }
4719 
4720 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4721 {
4722 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4723  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4724  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4725  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4726  {
4727  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4728  }
4729 #else
4730  // no-op
4731 #endif
4732 }
4733 
4734 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4735 {
4736 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4737  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4738  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4739  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4740  {
4741  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4742  {
4743  return false;
4744  }
4745  }
4746 #endif
4747  return true;
4748 }
4749 
4750 /*
4751 Fills structure with parameters of an example buffer to be used for transfers
4752 during GPU memory defragmentation.
4753 */
4754 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4755 {
4756  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4757  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4758  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4759  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4760 }
4761 
4762 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4763 struct VmaMutexLock
4764 {
4765  VMA_CLASS_NO_COPY(VmaMutexLock)
4766 public:
4767  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4768  m_pMutex(useMutex ? &mutex : VMA_NULL)
4769  { if(m_pMutex) { m_pMutex->Lock(); } }
4770  ~VmaMutexLock()
4771  { if(m_pMutex) { m_pMutex->Unlock(); } }
4772 private:
4773  VMA_MUTEX* m_pMutex;
4774 };
4775 
4776 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4777 struct VmaMutexLockRead
4778 {
4779  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4780 public:
4781  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4782  m_pMutex(useMutex ? &mutex : VMA_NULL)
4783  { if(m_pMutex) { m_pMutex->LockRead(); } }
4784  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4785 private:
4786  VMA_RW_MUTEX* m_pMutex;
4787 };
4788 
4789 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4790 struct VmaMutexLockWrite
4791 {
4792  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4793 public:
4794  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4795  m_pMutex(useMutex ? &mutex : VMA_NULL)
4796  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4797  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4798 private:
4799  VMA_RW_MUTEX* m_pMutex;
4800 };
4801 
4802 #if VMA_DEBUG_GLOBAL_MUTEX
4803  static VMA_MUTEX gDebugGlobalMutex;
4804  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4805 #else
4806  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4807 #endif
4808 
4809 // Minimum size of a free suballocation to register it in the free suballocation collection.
4810 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4811 
4812 /*
4813 Performs binary search and returns iterator to first element that is greater or
4814 equal to (key), according to comparison (cmp).
4815 
4816 Cmp should return true if first argument is less than second argument.
4817 
4818 Returned value is the found element, if present in the collection or place where
4819 new element with value (key) should be inserted.
4820 */
4821 template <typename CmpLess, typename IterT, typename KeyT>
4822 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4823 {
4824  size_t down = 0, up = (end - beg);
4825  while(down < up)
4826  {
4827  const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
4828  if(cmp(*(beg+mid), key))
4829  {
4830  down = mid + 1;
4831  }
4832  else
4833  {
4834  up = mid;
4835  }
4836  }
4837  return beg + down;
4838 }
4839 
4840 template<typename CmpLess, typename IterT, typename KeyT>
4841 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4842 {
4843  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4844  beg, end, value, cmp);
4845  if(it == end ||
4846  (!cmp(*it, value) && !cmp(value, *it)))
4847  {
4848  return it;
4849  }
4850  return end;
4851 }
4852 
4853 /*
4854 Returns true if all pointers in the array are not-null and unique.
4855 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4856 T must be pointer type, e.g. VmaAllocation, VmaPool.
4857 */
4858 template<typename T>
4859 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4860 {
4861  for(uint32_t i = 0; i < count; ++i)
4862  {
4863  const T iPtr = arr[i];
4864  if(iPtr == VMA_NULL)
4865  {
4866  return false;
4867  }
4868  for(uint32_t j = i + 1; j < count; ++j)
4869  {
4870  if(iPtr == arr[j])
4871  {
4872  return false;
4873  }
4874  }
4875  }
4876  return true;
4877 }
4878 
4879 template<typename MainT, typename NewT>
4880 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4881 {
4882  newStruct->pNext = mainStruct->pNext;
4883  mainStruct->pNext = newStruct;
4884 }
4885 
4887 // Memory allocation
4888 
4889 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4890 {
4891  void* result = VMA_NULL;
4892  if((pAllocationCallbacks != VMA_NULL) &&
4893  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4894  {
4895  result = (*pAllocationCallbacks->pfnAllocation)(
4896  pAllocationCallbacks->pUserData,
4897  size,
4898  alignment,
4899  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4900  }
4901  else
4902  {
4903  result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4904  }
4905  VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
4906  return result;
4907 }
4908 
4909 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4910 {
4911  if((pAllocationCallbacks != VMA_NULL) &&
4912  (pAllocationCallbacks->pfnFree != VMA_NULL))
4913  {
4914  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4915  }
4916  else
4917  {
4918  VMA_SYSTEM_ALIGNED_FREE(ptr);
4919  }
4920 }
4921 
4922 template<typename T>
4923 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4924 {
4925  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4926 }
4927 
4928 template<typename T>
4929 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4930 {
4931  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4932 }
4933 
4934 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4935 
4936 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4937 
4938 template<typename T>
4939 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4940 {
4941  ptr->~T();
4942  VmaFree(pAllocationCallbacks, ptr);
4943 }
4944 
4945 template<typename T>
4946 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4947 {
4948  if(ptr != VMA_NULL)
4949  {
4950  for(size_t i = count; i--; )
4951  {
4952  ptr[i].~T();
4953  }
4954  VmaFree(pAllocationCallbacks, ptr);
4955  }
4956 }
4957 
4958 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4959 {
4960  if(srcStr != VMA_NULL)
4961  {
4962  const size_t len = strlen(srcStr);
4963  char* const result = vma_new_array(allocs, char, len + 1);
4964  memcpy(result, srcStr, len + 1);
4965  return result;
4966  }
4967  else
4968  {
4969  return VMA_NULL;
4970  }
4971 }
4972 
4973 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4974 {
4975  if(str != VMA_NULL)
4976  {
4977  const size_t len = strlen(str);
4978  vma_delete_array(allocs, str, len + 1);
4979  }
4980 }
4981 
4982 // STL-compatible allocator.
4983 template<typename T>
4984 class VmaStlAllocator
4985 {
4986 public:
4987  const VkAllocationCallbacks* const m_pCallbacks;
4988  typedef T value_type;
4989 
4990  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4991  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4992 
4993  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4994  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4995 
4996  template<typename U>
4997  bool operator==(const VmaStlAllocator<U>& rhs) const
4998  {
4999  return m_pCallbacks == rhs.m_pCallbacks;
5000  }
5001  template<typename U>
5002  bool operator!=(const VmaStlAllocator<U>& rhs) const
5003  {
5004  return m_pCallbacks != rhs.m_pCallbacks;
5005  }
5006 
5007  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
5008  VmaStlAllocator(const VmaStlAllocator&) = default;
5009 };
5010 
5011 #if VMA_USE_STL_VECTOR
5012 
5013 #define VmaVector std::vector
5014 
5015 template<typename T, typename allocatorT>
5016 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
5017 {
5018  vec.insert(vec.begin() + index, item);
5019 }
5020 
5021 template<typename T, typename allocatorT>
5022 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
5023 {
5024  vec.erase(vec.begin() + index);
5025 }
5026 
5027 #else // #if VMA_USE_STL_VECTOR
5028 
5029 /* Class with interface compatible with subset of std::vector.
5030 T must be POD because constructors and destructors are not called and memcpy is
5031 used for these objects. */
5032 template<typename T, typename AllocatorT>
5033 class VmaVector
5034 {
5035 public:
5036  typedef T value_type;
5037 
5038  VmaVector(const AllocatorT& allocator) :
5039  m_Allocator(allocator),
5040  m_pArray(VMA_NULL),
5041  m_Count(0),
5042  m_Capacity(0)
5043  {
5044  }
5045 
5046  VmaVector(size_t count, const AllocatorT& allocator) :
5047  m_Allocator(allocator),
5048  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
5049  m_Count(count),
5050  m_Capacity(count)
5051  {
5052  }
5053 
5054  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
5055  // value is unused.
5056  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
5057  : VmaVector(count, allocator) {}
5058 
5059  VmaVector(const VmaVector<T, AllocatorT>& src) :
5060  m_Allocator(src.m_Allocator),
5061  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
5062  m_Count(src.m_Count),
5063  m_Capacity(src.m_Count)
5064  {
5065  if(m_Count != 0)
5066  {
5067  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
5068  }
5069  }
5070 
5071  ~VmaVector()
5072  {
5073  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5074  }
5075 
5076  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
5077  {
5078  if(&rhs != this)
5079  {
5080  resize(rhs.m_Count);
5081  if(m_Count != 0)
5082  {
5083  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
5084  }
5085  }
5086  return *this;
5087  }
5088 
5089  bool empty() const { return m_Count == 0; }
5090  size_t size() const { return m_Count; }
5091  T* data() { return m_pArray; }
5092  const T* data() const { return m_pArray; }
5093 
5094  T& operator[](size_t index)
5095  {
5096  VMA_HEAVY_ASSERT(index < m_Count);
5097  return m_pArray[index];
5098  }
5099  const T& operator[](size_t index) const
5100  {
5101  VMA_HEAVY_ASSERT(index < m_Count);
5102  return m_pArray[index];
5103  }
5104 
5105  T& front()
5106  {
5107  VMA_HEAVY_ASSERT(m_Count > 0);
5108  return m_pArray[0];
5109  }
5110  const T& front() const
5111  {
5112  VMA_HEAVY_ASSERT(m_Count > 0);
5113  return m_pArray[0];
5114  }
5115  T& back()
5116  {
5117  VMA_HEAVY_ASSERT(m_Count > 0);
5118  return m_pArray[m_Count - 1];
5119  }
5120  const T& back() const
5121  {
5122  VMA_HEAVY_ASSERT(m_Count > 0);
5123  return m_pArray[m_Count - 1];
5124  }
5125 
5126  void reserve(size_t newCapacity, bool freeMemory = false)
5127  {
5128  newCapacity = VMA_MAX(newCapacity, m_Count);
5129 
5130  if((newCapacity < m_Capacity) && !freeMemory)
5131  {
5132  newCapacity = m_Capacity;
5133  }
5134 
5135  if(newCapacity != m_Capacity)
5136  {
5137  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
5138  if(m_Count != 0)
5139  {
5140  memcpy(newArray, m_pArray, m_Count * sizeof(T));
5141  }
5142  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5143  m_Capacity = newCapacity;
5144  m_pArray = newArray;
5145  }
5146  }
5147 
5148  void resize(size_t newCount)
5149  {
5150  size_t newCapacity = m_Capacity;
5151  if(newCount > m_Capacity)
5152  {
5153  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
5154  }
5155 
5156  if(newCapacity != m_Capacity)
5157  {
5158  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
5159  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
5160  if(elementsToCopy != 0)
5161  {
5162  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
5163  }
5164  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5165  m_Capacity = newCapacity;
5166  m_pArray = newArray;
5167  }
5168 
5169  m_Count = newCount;
5170  }
5171 
5172  void clear()
5173  {
5174  resize(0);
5175  }
5176 
5177  void shrink_to_fit()
5178  {
5179  if(m_Capacity > m_Count)
5180  {
5181  T* newArray = VMA_NULL;
5182  if(m_Count > 0)
5183  {
5184  newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
5185  memcpy(newArray, m_pArray, m_Count * sizeof(T));
5186  }
5187  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5188  m_Capacity = m_Count;
5189  m_pArray = newArray;
5190  }
5191  }
5192 
5193  void insert(size_t index, const T& src)
5194  {
5195  VMA_HEAVY_ASSERT(index <= m_Count);
5196  const size_t oldCount = size();
5197  resize(oldCount + 1);
5198  if(index < oldCount)
5199  {
5200  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
5201  }
5202  m_pArray[index] = src;
5203  }
5204 
5205  void remove(size_t index)
5206  {
5207  VMA_HEAVY_ASSERT(index < m_Count);
5208  const size_t oldCount = size();
5209  if(index < oldCount - 1)
5210  {
5211  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
5212  }
5213  resize(oldCount - 1);
5214  }
5215 
5216  void push_back(const T& src)
5217  {
5218  const size_t newIndex = size();
5219  resize(newIndex + 1);
5220  m_pArray[newIndex] = src;
5221  }
5222 
5223  void pop_back()
5224  {
5225  VMA_HEAVY_ASSERT(m_Count > 0);
5226  resize(size() - 1);
5227  }
5228 
5229  void push_front(const T& src)
5230  {
5231  insert(0, src);
5232  }
5233 
5234  void pop_front()
5235  {
5236  VMA_HEAVY_ASSERT(m_Count > 0);
5237  remove(0);
5238  }
5239 
5240  typedef T* iterator;
5241  typedef const T* const_iterator;
5242 
5243  iterator begin() { return m_pArray; }
5244  iterator end() { return m_pArray + m_Count; }
5245  const_iterator cbegin() const { return m_pArray; }
5246  const_iterator cend() const { return m_pArray + m_Count; }
5247  const_iterator begin() const { return cbegin(); }
5248  const_iterator end() const { return cend(); }
5249 
5250 private:
5251  AllocatorT m_Allocator;
5252  T* m_pArray;
5253  size_t m_Count;
5254  size_t m_Capacity;
5255 };
5256 
5257 template<typename T, typename allocatorT>
5258 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
5259 {
5260  vec.insert(index, item);
5261 }
5262 
5263 template<typename T, typename allocatorT>
5264 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
5265 {
5266  vec.remove(index);
5267 }
5268 
5269 #endif // #if VMA_USE_STL_VECTOR
5270 
5271 template<typename CmpLess, typename VectorT>
5272 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
5273 {
5274  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5275  vector.data(),
5276  vector.data() + vector.size(),
5277  value,
5278  CmpLess()) - vector.data();
5279  VmaVectorInsert(vector, indexToInsert, value);
5280  return indexToInsert;
5281 }
5282 
5283 template<typename CmpLess, typename VectorT>
5284 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
5285 {
5286  CmpLess comparator;
5287  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
5288  vector.begin(),
5289  vector.end(),
5290  value,
5291  comparator);
5292  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
5293  {
5294  size_t indexToRemove = it - vector.begin();
5295  VmaVectorRemove(vector, indexToRemove);
5296  return true;
5297  }
5298  return false;
5299 }
5300 
5302 // class VmaSmallVector
5303 
5304 /*
5305 This is a vector (a variable-sized array), optimized for the case when the array is small.
5306 
5307 It contains some number of elements in-place, which allows it to avoid heap allocation
5308 when the actual number of elements is below that threshold. This allows normal "small"
5309 cases to be fast without losing generality for large inputs.
5310 */
5311 
5312 template<typename T, typename AllocatorT, size_t N>
5313 class VmaSmallVector
5314 {
5315 public:
5316  typedef T value_type;
5317 
5318  VmaSmallVector(const AllocatorT& allocator) :
5319  m_Count(0),
5320  m_DynamicArray(allocator)
5321  {
5322  }
5323  VmaSmallVector(size_t count, const AllocatorT& allocator) :
5324  m_Count(count),
5325  m_DynamicArray(count > N ? count : 0, allocator)
5326  {
5327  }
5328  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5329  VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) = delete;
5330  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5331  VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) = delete;
5332 
5333  bool empty() const { return m_Count == 0; }
5334  size_t size() const { return m_Count; }
5335  T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5336  const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5337 
5338  T& operator[](size_t index)
5339  {
5340  VMA_HEAVY_ASSERT(index < m_Count);
5341  return data()[index];
5342  }
5343  const T& operator[](size_t index) const
5344  {
5345  VMA_HEAVY_ASSERT(index < m_Count);
5346  return data()[index];
5347  }
5348 
5349  T& front()
5350  {
5351  VMA_HEAVY_ASSERT(m_Count > 0);
5352  return data()[0];
5353  }
5354  const T& front() const
5355  {
5356  VMA_HEAVY_ASSERT(m_Count > 0);
5357  return data()[0];
5358  }
5359  T& back()
5360  {
5361  VMA_HEAVY_ASSERT(m_Count > 0);
5362  return data()[m_Count - 1];
5363  }
5364  const T& back() const
5365  {
5366  VMA_HEAVY_ASSERT(m_Count > 0);
5367  return data()[m_Count - 1];
5368  }
5369 
5370  void resize(size_t newCount, bool freeMemory = false)
5371  {
5372  if(newCount > N && m_Count > N)
5373  {
5374  // Any direction, staying in m_DynamicArray
5375  m_DynamicArray.resize(newCount);
5376  if(freeMemory)
5377  {
5378  m_DynamicArray.shrink_to_fit();
5379  }
5380  }
5381  else if(newCount > N && m_Count <= N)
5382  {
5383  // Growing, moving from m_StaticArray to m_DynamicArray
5384  m_DynamicArray.resize(newCount);
5385  if(m_Count > 0)
5386  {
5387  memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
5388  }
5389  }
5390  else if(newCount <= N && m_Count > N)
5391  {
5392  // Shrinking, moving from m_DynamicArray to m_StaticArray
5393  if(newCount > 0)
5394  {
5395  memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
5396  }
5397  m_DynamicArray.resize(0);
5398  if(freeMemory)
5399  {
5400  m_DynamicArray.shrink_to_fit();
5401  }
5402  }
5403  else
5404  {
5405  // Any direction, staying in m_StaticArray - nothing to do here
5406  }
5407  m_Count = newCount;
5408  }
5409 
5410  void clear(bool freeMemory = false)
5411  {
5412  m_DynamicArray.clear();
5413  if(freeMemory)
5414  {
5415  m_DynamicArray.shrink_to_fit();
5416  }
5417  m_Count = 0;
5418  }
5419 
5420  void insert(size_t index, const T& src)
5421  {
5422  VMA_HEAVY_ASSERT(index <= m_Count);
5423  const size_t oldCount = size();
5424  resize(oldCount + 1);
5425  T* const dataPtr = data();
5426  if(index < oldCount)
5427  {
5428  // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
5429  memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
5430  }
5431  dataPtr[index] = src;
5432  }
5433 
5434  void remove(size_t index)
5435  {
5436  VMA_HEAVY_ASSERT(index < m_Count);
5437  const size_t oldCount = size();
5438  if(index < oldCount - 1)
5439  {
5440  // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
5441  T* const dataPtr = data();
5442  memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
5443  }
5444  resize(oldCount - 1);
5445  }
5446 
5447  void push_back(const T& src)
5448  {
5449  const size_t newIndex = size();
5450  resize(newIndex + 1);
5451  data()[newIndex] = src;
5452  }
5453 
5454  void pop_back()
5455  {
5456  VMA_HEAVY_ASSERT(m_Count > 0);
5457  resize(size() - 1);
5458  }
5459 
5460  void push_front(const T& src)
5461  {
5462  insert(0, src);
5463  }
5464 
5465  void pop_front()
5466  {
5467  VMA_HEAVY_ASSERT(m_Count > 0);
5468  remove(0);
5469  }
5470 
5471  typedef T* iterator;
5472 
5473  iterator begin() { return data(); }
5474  iterator end() { return data() + m_Count; }
5475 
5476 private:
5477  size_t m_Count;
5478  T m_StaticArray[N]; // Used when m_Size <= N
5479  VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
5480 };
5481 
5483 // class VmaPoolAllocator
5484 
5485 /*
5486 Allocator for objects of type T using a list of arrays (pools) to speed up
5487 allocation. Number of elements that can be allocated is not bounded because
5488 allocator can create multiple blocks.
5489 */
5490 template<typename T>
5491 class VmaPoolAllocator
5492 {
5493  VMA_CLASS_NO_COPY(VmaPoolAllocator)
5494 public:
5495  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
5496  ~VmaPoolAllocator();
5497  template<typename... Types> T* Alloc(Types... args);
5498  void Free(T* ptr);
5499 
5500 private:
5501  union Item
5502  {
5503  uint32_t NextFreeIndex;
5504  alignas(T) char Value[sizeof(T)];
5505  };
5506 
5507  struct ItemBlock
5508  {
5509  Item* pItems;
5510  uint32_t Capacity;
5511  uint32_t FirstFreeIndex;
5512  };
5513 
5514  const VkAllocationCallbacks* m_pAllocationCallbacks;
5515  const uint32_t m_FirstBlockCapacity;
5516  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
5517 
5518  ItemBlock& CreateNewBlock();
5519 };
5520 
5521 template<typename T>
5522 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
5523  m_pAllocationCallbacks(pAllocationCallbacks),
5524  m_FirstBlockCapacity(firstBlockCapacity),
5525  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
5526 {
5527  VMA_ASSERT(m_FirstBlockCapacity > 1);
5528 }
5529 
5530 template<typename T>
5531 VmaPoolAllocator<T>::~VmaPoolAllocator()
5532 {
5533  for(size_t i = m_ItemBlocks.size(); i--; )
5534  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
5535  m_ItemBlocks.clear();
5536 }
5537 
5538 template<typename T>
5539 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
5540 {
5541  for(size_t i = m_ItemBlocks.size(); i--; )
5542  {
5543  ItemBlock& block = m_ItemBlocks[i];
5544  // This block has some free items: Use first one.
5545  if(block.FirstFreeIndex != UINT32_MAX)
5546  {
5547  Item* const pItem = &block.pItems[block.FirstFreeIndex];
5548  block.FirstFreeIndex = pItem->NextFreeIndex;
5549  T* result = (T*)&pItem->Value;
5550  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5551  return result;
5552  }
5553  }
5554 
5555  // No block has free item: Create new one and use it.
5556  ItemBlock& newBlock = CreateNewBlock();
5557  Item* const pItem = &newBlock.pItems[0];
5558  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
5559  T* result = (T*)&pItem->Value;
5560  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5561  return result;
5562 }
5563 
5564 template<typename T>
5565 void VmaPoolAllocator<T>::Free(T* ptr)
5566 {
5567  // Search all memory blocks to find ptr.
5568  for(size_t i = m_ItemBlocks.size(); i--; )
5569  {
5570  ItemBlock& block = m_ItemBlocks[i];
5571 
5572  // Casting to union.
5573  Item* pItemPtr;
5574  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
5575 
5576  // Check if pItemPtr is in address range of this block.
5577  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
5578  {
5579  ptr->~T(); // Explicit destructor call.
5580  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
5581  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
5582  block.FirstFreeIndex = index;
5583  return;
5584  }
5585  }
5586  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
5587 }
5588 
5589 template<typename T>
5590 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
5591 {
5592  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
5593  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
5594 
5595  const ItemBlock newBlock = {
5596  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
5597  newBlockCapacity,
5598  0 };
5599 
5600  m_ItemBlocks.push_back(newBlock);
5601 
5602  // Setup singly-linked list of all free items in this block.
5603  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
5604  newBlock.pItems[i].NextFreeIndex = i + 1;
5605  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
5606  return m_ItemBlocks.back();
5607 }
5608 
5610 // class VmaRawList, VmaList
5611 
5612 #if VMA_USE_STL_LIST
5613 
5614 #define VmaList std::list
5615 
5616 #else // #if VMA_USE_STL_LIST
5617 
5618 template<typename T>
5619 struct VmaListItem
5620 {
5621  VmaListItem* pPrev;
5622  VmaListItem* pNext;
5623  T Value;
5624 };
5625 
5626 // Doubly linked list.
5627 template<typename T>
5628 class VmaRawList
5629 {
5630  VMA_CLASS_NO_COPY(VmaRawList)
5631 public:
5632  typedef VmaListItem<T> ItemType;
5633 
5634  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
5635  ~VmaRawList();
5636  void Clear();
5637 
5638  size_t GetCount() const { return m_Count; }
5639  bool IsEmpty() const { return m_Count == 0; }
5640 
5641  ItemType* Front() { return m_pFront; }
5642  const ItemType* Front() const { return m_pFront; }
5643  ItemType* Back() { return m_pBack; }
5644  const ItemType* Back() const { return m_pBack; }
5645 
5646  ItemType* PushBack();
5647  ItemType* PushFront();
5648  ItemType* PushBack(const T& value);
5649  ItemType* PushFront(const T& value);
5650  void PopBack();
5651  void PopFront();
5652 
5653  // Item can be null - it means PushBack.
5654  ItemType* InsertBefore(ItemType* pItem);
5655  // Item can be null - it means PushFront.
5656  ItemType* InsertAfter(ItemType* pItem);
5657 
5658  ItemType* InsertBefore(ItemType* pItem, const T& value);
5659  ItemType* InsertAfter(ItemType* pItem, const T& value);
5660 
5661  void Remove(ItemType* pItem);
5662 
5663 private:
5664  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5665  VmaPoolAllocator<ItemType> m_ItemAllocator;
5666  ItemType* m_pFront;
5667  ItemType* m_pBack;
5668  size_t m_Count;
5669 };
5670 
5671 template<typename T>
5672 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5673  m_pAllocationCallbacks(pAllocationCallbacks),
5674  m_ItemAllocator(pAllocationCallbacks, 128),
5675  m_pFront(VMA_NULL),
5676  m_pBack(VMA_NULL),
5677  m_Count(0)
5678 {
5679 }
5680 
5681 template<typename T>
5682 VmaRawList<T>::~VmaRawList()
5683 {
5684  // Intentionally not calling Clear, because that would be unnecessary
5685  // computations to return all items to m_ItemAllocator as free.
5686 }
5687 
5688 template<typename T>
5689 void VmaRawList<T>::Clear()
5690 {
5691  if(IsEmpty() == false)
5692  {
5693  ItemType* pItem = m_pBack;
5694  while(pItem != VMA_NULL)
5695  {
5696  ItemType* const pPrevItem = pItem->pPrev;
5697  m_ItemAllocator.Free(pItem);
5698  pItem = pPrevItem;
5699  }
5700  m_pFront = VMA_NULL;
5701  m_pBack = VMA_NULL;
5702  m_Count = 0;
5703  }
5704 }
5705 
5706 template<typename T>
5707 VmaListItem<T>* VmaRawList<T>::PushBack()
5708 {
5709  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5710  pNewItem->pNext = VMA_NULL;
5711  if(IsEmpty())
5712  {
5713  pNewItem->pPrev = VMA_NULL;
5714  m_pFront = pNewItem;
5715  m_pBack = pNewItem;
5716  m_Count = 1;
5717  }
5718  else
5719  {
5720  pNewItem->pPrev = m_pBack;
5721  m_pBack->pNext = pNewItem;
5722  m_pBack = pNewItem;
5723  ++m_Count;
5724  }
5725  return pNewItem;
5726 }
5727 
5728 template<typename T>
5729 VmaListItem<T>* VmaRawList<T>::PushFront()
5730 {
5731  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5732  pNewItem->pPrev = VMA_NULL;
5733  if(IsEmpty())
5734  {
5735  pNewItem->pNext = VMA_NULL;
5736  m_pFront = pNewItem;
5737  m_pBack = pNewItem;
5738  m_Count = 1;
5739  }
5740  else
5741  {
5742  pNewItem->pNext = m_pFront;
5743  m_pFront->pPrev = pNewItem;
5744  m_pFront = pNewItem;
5745  ++m_Count;
5746  }
5747  return pNewItem;
5748 }
5749 
5750 template<typename T>
5751 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5752 {
5753  ItemType* const pNewItem = PushBack();
5754  pNewItem->Value = value;
5755  return pNewItem;
5756 }
5757 
5758 template<typename T>
5759 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5760 {
5761  ItemType* const pNewItem = PushFront();
5762  pNewItem->Value = value;
5763  return pNewItem;
5764 }
5765 
5766 template<typename T>
5767 void VmaRawList<T>::PopBack()
5768 {
5769  VMA_HEAVY_ASSERT(m_Count > 0);
5770  ItemType* const pBackItem = m_pBack;
5771  ItemType* const pPrevItem = pBackItem->pPrev;
5772  if(pPrevItem != VMA_NULL)
5773  {
5774  pPrevItem->pNext = VMA_NULL;
5775  }
5776  m_pBack = pPrevItem;
5777  m_ItemAllocator.Free(pBackItem);
5778  --m_Count;
5779 }
5780 
5781 template<typename T>
5782 void VmaRawList<T>::PopFront()
5783 {
5784  VMA_HEAVY_ASSERT(m_Count > 0);
5785  ItemType* const pFrontItem = m_pFront;
5786  ItemType* const pNextItem = pFrontItem->pNext;
5787  if(pNextItem != VMA_NULL)
5788  {
5789  pNextItem->pPrev = VMA_NULL;
5790  }
5791  m_pFront = pNextItem;
5792  m_ItemAllocator.Free(pFrontItem);
5793  --m_Count;
5794 }
5795 
5796 template<typename T>
5797 void VmaRawList<T>::Remove(ItemType* pItem)
5798 {
5799  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5800  VMA_HEAVY_ASSERT(m_Count > 0);
5801 
5802  if(pItem->pPrev != VMA_NULL)
5803  {
5804  pItem->pPrev->pNext = pItem->pNext;
5805  }
5806  else
5807  {
5808  VMA_HEAVY_ASSERT(m_pFront == pItem);
5809  m_pFront = pItem->pNext;
5810  }
5811 
5812  if(pItem->pNext != VMA_NULL)
5813  {
5814  pItem->pNext->pPrev = pItem->pPrev;
5815  }
5816  else
5817  {
5818  VMA_HEAVY_ASSERT(m_pBack == pItem);
5819  m_pBack = pItem->pPrev;
5820  }
5821 
5822  m_ItemAllocator.Free(pItem);
5823  --m_Count;
5824 }
5825 
5826 template<typename T>
5827 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5828 {
5829  if(pItem != VMA_NULL)
5830  {
5831  ItemType* const prevItem = pItem->pPrev;
5832  ItemType* const newItem = m_ItemAllocator.Alloc();
5833  newItem->pPrev = prevItem;
5834  newItem->pNext = pItem;
5835  pItem->pPrev = newItem;
5836  if(prevItem != VMA_NULL)
5837  {
5838  prevItem->pNext = newItem;
5839  }
5840  else
5841  {
5842  VMA_HEAVY_ASSERT(m_pFront == pItem);
5843  m_pFront = newItem;
5844  }
5845  ++m_Count;
5846  return newItem;
5847  }
5848  else
5849  return PushBack();
5850 }
5851 
5852 template<typename T>
5853 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5854 {
5855  if(pItem != VMA_NULL)
5856  {
5857  ItemType* const nextItem = pItem->pNext;
5858  ItemType* const newItem = m_ItemAllocator.Alloc();
5859  newItem->pNext = nextItem;
5860  newItem->pPrev = pItem;
5861  pItem->pNext = newItem;
5862  if(nextItem != VMA_NULL)
5863  {
5864  nextItem->pPrev = newItem;
5865  }
5866  else
5867  {
5868  VMA_HEAVY_ASSERT(m_pBack == pItem);
5869  m_pBack = newItem;
5870  }
5871  ++m_Count;
5872  return newItem;
5873  }
5874  else
5875  return PushFront();
5876 }
5877 
5878 template<typename T>
5879 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5880 {
5881  ItemType* const newItem = InsertBefore(pItem);
5882  newItem->Value = value;
5883  return newItem;
5884 }
5885 
5886 template<typename T>
5887 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5888 {
5889  ItemType* const newItem = InsertAfter(pItem);
5890  newItem->Value = value;
5891  return newItem;
5892 }
5893 
5894 template<typename T, typename AllocatorT>
5895 class VmaList
5896 {
5897  VMA_CLASS_NO_COPY(VmaList)
5898 public:
5899  class iterator
5900  {
5901  public:
5902  iterator() :
5903  m_pList(VMA_NULL),
5904  m_pItem(VMA_NULL)
5905  {
5906  }
5907 
5908  T& operator*() const
5909  {
5910  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5911  return m_pItem->Value;
5912  }
5913  T* operator->() const
5914  {
5915  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5916  return &m_pItem->Value;
5917  }
5918 
5919  iterator& operator++()
5920  {
5921  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5922  m_pItem = m_pItem->pNext;
5923  return *this;
5924  }
5925  iterator& operator--()
5926  {
5927  if(m_pItem != VMA_NULL)
5928  {
5929  m_pItem = m_pItem->pPrev;
5930  }
5931  else
5932  {
5933  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5934  m_pItem = m_pList->Back();
5935  }
5936  return *this;
5937  }
5938 
5939  iterator operator++(int)
5940  {
5941  iterator result = *this;
5942  ++*this;
5943  return result;
5944  }
5945  iterator operator--(int)
5946  {
5947  iterator result = *this;
5948  --*this;
5949  return result;
5950  }
5951 
5952  bool operator==(const iterator& rhs) const
5953  {
5954  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5955  return m_pItem == rhs.m_pItem;
5956  }
5957  bool operator!=(const iterator& rhs) const
5958  {
5959  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5960  return m_pItem != rhs.m_pItem;
5961  }
5962 
5963  private:
5964  VmaRawList<T>* m_pList;
5965  VmaListItem<T>* m_pItem;
5966 
5967  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5968  m_pList(pList),
5969  m_pItem(pItem)
5970  {
5971  }
5972 
5973  friend class VmaList<T, AllocatorT>;
5974  };
5975 
5976  class const_iterator
5977  {
5978  public:
5979  const_iterator() :
5980  m_pList(VMA_NULL),
5981  m_pItem(VMA_NULL)
5982  {
5983  }
5984 
5985  const_iterator(const iterator& src) :
5986  m_pList(src.m_pList),
5987  m_pItem(src.m_pItem)
5988  {
5989  }
5990 
5991  const T& operator*() const
5992  {
5993  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5994  return m_pItem->Value;
5995  }
5996  const T* operator->() const
5997  {
5998  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5999  return &m_pItem->Value;
6000  }
6001 
6002  const_iterator& operator++()
6003  {
6004  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
6005  m_pItem = m_pItem->pNext;
6006  return *this;
6007  }
6008  const_iterator& operator--()
6009  {
6010  if(m_pItem != VMA_NULL)
6011  {
6012  m_pItem = m_pItem->pPrev;
6013  }
6014  else
6015  {
6016  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
6017  m_pItem = m_pList->Back();
6018  }
6019  return *this;
6020  }
6021 
6022  const_iterator operator++(int)
6023  {
6024  const_iterator result = *this;
6025  ++*this;
6026  return result;
6027  }
6028  const_iterator operator--(int)
6029  {
6030  const_iterator result = *this;
6031  --*this;
6032  return result;
6033  }
6034 
6035  bool operator==(const const_iterator& rhs) const
6036  {
6037  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
6038  return m_pItem == rhs.m_pItem;
6039  }
6040  bool operator!=(const const_iterator& rhs) const
6041  {
6042  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
6043  return m_pItem != rhs.m_pItem;
6044  }
6045 
6046  private:
6047  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
6048  m_pList(pList),
6049  m_pItem(pItem)
6050  {
6051  }
6052 
6053  const VmaRawList<T>* m_pList;
6054  const VmaListItem<T>* m_pItem;
6055 
6056  friend class VmaList<T, AllocatorT>;
6057  };
6058 
6059  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
6060 
6061  bool empty() const { return m_RawList.IsEmpty(); }
6062  size_t size() const { return m_RawList.GetCount(); }
6063 
6064  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
6065  iterator end() { return iterator(&m_RawList, VMA_NULL); }
6066 
6067  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
6068  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
6069 
6070  const_iterator begin() const { return cbegin(); }
6071  const_iterator end() const { return cend(); }
6072 
6073  void clear() { m_RawList.Clear(); }
6074  void push_back(const T& value) { m_RawList.PushBack(value); }
6075  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
6076  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
6077 
6078 private:
6079  VmaRawList<T> m_RawList;
6080 };
6081 
6082 #endif // #if VMA_USE_STL_LIST
6083 
6085 // class VmaIntrusiveLinkedList
6086 
6087 /*
6088 Expected interface of ItemTypeTraits:
6089 struct MyItemTypeTraits
6090 {
6091  typedef MyItem ItemType;
6092  static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
6093  static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
6094  static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
6095  static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
6096 };
6097 */
6098 template<typename ItemTypeTraits>
6099 class VmaIntrusiveLinkedList
6100 {
6101 public:
6102  typedef typename ItemTypeTraits::ItemType ItemType;
6103  static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
6104  static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
6105  // Movable, not copyable.
6106  VmaIntrusiveLinkedList() { }
6107  VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList<ItemTypeTraits>& src) = delete;
6108  VmaIntrusiveLinkedList(VmaIntrusiveLinkedList<ItemTypeTraits>&& src) :
6109  m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
6110  {
6111  src.m_Front = src.m_Back = VMA_NULL;
6112  src.m_Count = 0;
6113  }
6114  ~VmaIntrusiveLinkedList()
6115  {
6116  VMA_HEAVY_ASSERT(IsEmpty());
6117  }
6118  VmaIntrusiveLinkedList<ItemTypeTraits>& operator=(const VmaIntrusiveLinkedList<ItemTypeTraits>& src) = delete;
6119  VmaIntrusiveLinkedList<ItemTypeTraits>& operator=(VmaIntrusiveLinkedList<ItemTypeTraits>&& src)
6120  {
6121  if(&src != this)
6122  {
6123  VMA_HEAVY_ASSERT(IsEmpty());
6124  m_Front = src.m_Front;
6125  m_Back = src.m_Back;
6126  m_Count = src.m_Count;
6127  src.m_Front = src.m_Back = VMA_NULL;
6128  src.m_Count = 0;
6129  }
6130  return *this;
6131  }
6132  void RemoveAll()
6133  {
6134  if(!IsEmpty())
6135  {
6136  ItemType* item = m_Back;
6137  while(item != VMA_NULL)
6138  {
6139  ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
6140  ItemTypeTraits::AccessPrev(item) = VMA_NULL;
6141  ItemTypeTraits::AccessNext(item) = VMA_NULL;
6142  item = prevItem;
6143  }
6144  m_Front = VMA_NULL;
6145  m_Back = VMA_NULL;
6146  m_Count = 0;
6147  }
6148  }
6149  size_t GetCount() const { return m_Count; }
6150  bool IsEmpty() const { return m_Count == 0; }
6151  ItemType* Front() { return m_Front; }
6152  const ItemType* Front() const { return m_Front; }
6153  ItemType* Back() { return m_Back; }
6154  const ItemType* Back() const { return m_Back; }
6155  void PushBack(ItemType* item)
6156  {
6157  VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
6158  if(IsEmpty())
6159  {
6160  m_Front = item;
6161  m_Back = item;
6162  m_Count = 1;
6163  }
6164  else
6165  {
6166  ItemTypeTraits::AccessPrev(item) = m_Back;
6167  ItemTypeTraits::AccessNext(m_Back) = item;
6168  m_Back = item;
6169  ++m_Count;
6170  }
6171  }
6172  void PushFront(ItemType* item)
6173  {
6174  VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
6175  if(IsEmpty())
6176  {
6177  m_Front = item;
6178  m_Back = item;
6179  m_Count = 1;
6180  }
6181  else
6182  {
6183  ItemTypeTraits::AccessNext(item) = m_Front;
6184  ItemTypeTraits::AccessPrev(m_Front) = item;
6185  m_Front = item;
6186  ++m_Count;
6187  }
6188  }
6189  ItemType* PopBack()
6190  {
6191  VMA_HEAVY_ASSERT(m_Count > 0);
6192  ItemType* const backItem = m_Back;
6193  ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
6194  if(prevItem != VMA_NULL)
6195  {
6196  ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
6197  }
6198  m_Back = prevItem;
6199  --m_Count;
6200  ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
6201  ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
6202  return backItem;
6203  }
6204  ItemType* PopFront()
6205  {
6206  VMA_HEAVY_ASSERT(m_Count > 0);
6207  ItemType* const frontItem = m_Front;
6208  ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
6209  if(nextItem != VMA_NULL)
6210  {
6211  ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
6212  }
6213  m_Front = nextItem;
6214  --m_Count;
6215  ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
6216  ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
6217  return frontItem;
6218  }
6219 
6220  // MyItem can be null - it means PushBack.
6221  void InsertBefore(ItemType* existingItem, ItemType* newItem)
6222  {
6223  VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
6224  if(existingItem != VMA_NULL)
6225  {
6226  ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
6227  ItemTypeTraits::AccessPrev(newItem) = prevItem;
6228  ItemTypeTraits::AccessNext(newItem) = existingItem;
6229  ItemTypeTraits::AccessPrev(existingItem) = newItem;
6230  if(prevItem != VMA_NULL)
6231  {
6232  ItemTypeTraits::AccessNext(prevItem) = newItem;
6233  }
6234  else
6235  {
6236  VMA_HEAVY_ASSERT(m_Front == existingItem);
6237  m_Front = newItem;
6238  }
6239  ++m_Count;
6240  }
6241  else
6242  PushBack(newItem);
6243  }
6244  // MyItem can be null - it means PushFront.
6245  void InsertAfter(ItemType* existingItem, ItemType* newItem)
6246  {
6247  VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
6248  if(existingItem != VMA_NULL)
6249  {
6250  ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
6251  ItemTypeTraits::AccessNext(newItem) = nextItem;
6252  ItemTypeTraits::AccessPrev(newItem) = existingItem;
6253  ItemTypeTraits::AccessNext(existingItem) = newItem;
6254  if(nextItem != VMA_NULL)
6255  {
6256  ItemTypeTraits::AccessPrev(nextItem) = newItem;
6257  }
6258  else
6259  {
6260  VMA_HEAVY_ASSERT(m_Back == existingItem);
6261  m_Back = newItem;
6262  }
6263  ++m_Count;
6264  }
6265  else
6266  return PushFront(newItem);
6267  }
6268  void Remove(ItemType* item)
6269  {
6270  VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
6271  if(ItemTypeTraits::GetPrev(item) != VMA_NULL)
6272  {
6273  ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
6274  }
6275  else
6276  {
6277  VMA_HEAVY_ASSERT(m_Front == item);
6278  m_Front = ItemTypeTraits::GetNext(item);
6279  }
6280 
6281  if(ItemTypeTraits::GetNext(item) != VMA_NULL)
6282  {
6283  ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
6284  }
6285  else
6286  {
6287  VMA_HEAVY_ASSERT(m_Back == item);
6288  m_Back = ItemTypeTraits::GetPrev(item);
6289  }
6290  ItemTypeTraits::AccessPrev(item) = VMA_NULL;
6291  ItemTypeTraits::AccessNext(item) = VMA_NULL;
6292  --m_Count;
6293  }
6294 private:
6295  ItemType* m_Front = VMA_NULL;
6296  ItemType* m_Back = VMA_NULL;
6297  size_t m_Count = 0;
6298 };
6299 
6301 // class VmaMap
6302 
6303 // Unused in this version.
6304 #if 0
6305 
6306 #if VMA_USE_STL_UNORDERED_MAP
6307 
6308 #define VmaPair std::pair
6309 
6310 #define VMA_MAP_TYPE(KeyT, ValueT) \
6311  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
6312 
6313 #else // #if VMA_USE_STL_UNORDERED_MAP
6314 
6315 template<typename T1, typename T2>
6316 struct VmaPair
6317 {
6318  T1 first;
6319  T2 second;
6320 
6321  VmaPair() : first(), second() { }
6322  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
6323 };
6324 
6325 /* Class compatible with subset of interface of std::unordered_map.
6326 KeyT, ValueT must be POD because they will be stored in VmaVector.
6327 */
6328 template<typename KeyT, typename ValueT>
6329 class VmaMap
6330 {
6331 public:
6332  typedef VmaPair<KeyT, ValueT> PairType;
6333  typedef PairType* iterator;
6334 
6335  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
6336 
6337  iterator begin() { return m_Vector.begin(); }
6338  iterator end() { return m_Vector.end(); }
6339 
6340  void insert(const PairType& pair);
6341  iterator find(const KeyT& key);
6342  void erase(iterator it);
6343 
6344 private:
6345  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
6346 };
6347 
6348 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
6349 
6350 template<typename FirstT, typename SecondT>
6351 struct VmaPairFirstLess
6352 {
6353  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
6354  {
6355  return lhs.first < rhs.first;
6356  }
6357  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
6358  {
6359  return lhs.first < rhsFirst;
6360  }
6361 };
6362 
6363 template<typename KeyT, typename ValueT>
6364 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
6365 {
6366  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
6367  m_Vector.data(),
6368  m_Vector.data() + m_Vector.size(),
6369  pair,
6370  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
6371  VmaVectorInsert(m_Vector, indexToInsert, pair);
6372 }
6373 
6374 template<typename KeyT, typename ValueT>
6375 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
6376 {
6377  PairType* it = VmaBinaryFindFirstNotLess(
6378  m_Vector.data(),
6379  m_Vector.data() + m_Vector.size(),
6380  key,
6381  VmaPairFirstLess<KeyT, ValueT>());
6382  if((it != m_Vector.end()) && (it->first == key))
6383  {
6384  return it;
6385  }
6386  else
6387  {
6388  return m_Vector.end();
6389  }
6390 }
6391 
6392 template<typename KeyT, typename ValueT>
6393 void VmaMap<KeyT, ValueT>::erase(iterator it)
6394 {
6395  VmaVectorRemove(m_Vector, it - m_Vector.begin());
6396 }
6397 
6398 #endif // #if VMA_USE_STL_UNORDERED_MAP
6399 
6400 #endif // #if 0
6401 
6403 
6404 class VmaDeviceMemoryBlock;
6405 
6406 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
6407 
6408 struct VmaAllocation_T
6409 {
6410 private:
6411  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
6412 
6413  enum FLAGS
6414  {
6415  FLAG_USER_DATA_STRING = 0x01,
6416  };
6417 
6418 public:
6419  enum ALLOCATION_TYPE
6420  {
6421  ALLOCATION_TYPE_NONE,
6422  ALLOCATION_TYPE_BLOCK,
6423  ALLOCATION_TYPE_DEDICATED,
6424  };
6425 
6426  /*
6427  This struct is allocated using VmaPoolAllocator.
6428  */
6429 
6430  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
6431  m_Alignment{1},
6432  m_Size{0},
6433  m_pUserData{VMA_NULL},
6434  m_LastUseFrameIndex{currentFrameIndex},
6435  m_MemoryTypeIndex{0},
6436  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
6437  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
6438  m_MapCount{0},
6439  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
6440  {
6441 #if VMA_STATS_STRING_ENABLED
6442  m_CreationFrameIndex = currentFrameIndex;
6443  m_BufferImageUsage = 0;
6444 #endif
6445  }
6446 
6447  ~VmaAllocation_T()
6448  {
6449  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
6450 
6451  // Check if owned string was freed.
6452  VMA_ASSERT(m_pUserData == VMA_NULL);
6453  }
6454 
6455  void InitBlockAllocation(
6456  VmaDeviceMemoryBlock* block,
6457  VkDeviceSize offset,
6458  VkDeviceSize alignment,
6459  VkDeviceSize size,
6460  uint32_t memoryTypeIndex,
6461  VmaSuballocationType suballocationType,
6462  bool mapped,
6463  bool canBecomeLost)
6464  {
6465  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6466  VMA_ASSERT(block != VMA_NULL);
6467  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6468  m_Alignment = alignment;
6469  m_Size = size;
6470  m_MemoryTypeIndex = memoryTypeIndex;
6471  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6472  m_SuballocationType = (uint8_t)suballocationType;
6473  m_BlockAllocation.m_Block = block;
6474  m_BlockAllocation.m_Offset = offset;
6475  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
6476  }
6477 
6478  void InitLost()
6479  {
6480  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6481  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
6482  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6483  m_MemoryTypeIndex = 0;
6484  m_BlockAllocation.m_Block = VMA_NULL;
6485  m_BlockAllocation.m_Offset = 0;
6486  m_BlockAllocation.m_CanBecomeLost = true;
6487  }
6488 
6489  void ChangeBlockAllocation(
6490  VmaAllocator hAllocator,
6491  VmaDeviceMemoryBlock* block,
6492  VkDeviceSize offset);
6493 
6494  void ChangeOffset(VkDeviceSize newOffset);
6495 
6496  // pMappedData not null means allocation is created with MAPPED flag.
6497  void InitDedicatedAllocation(
6498  uint32_t memoryTypeIndex,
6499  VkDeviceMemory hMemory,
6500  VmaSuballocationType suballocationType,
6501  void* pMappedData,
6502  VkDeviceSize size)
6503  {
6504  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6505  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
6506  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
6507  m_Alignment = 0;
6508  m_Size = size;
6509  m_MemoryTypeIndex = memoryTypeIndex;
6510  m_SuballocationType = (uint8_t)suballocationType;
6511  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6512  m_DedicatedAllocation.m_hMemory = hMemory;
6513  m_DedicatedAllocation.m_pMappedData = pMappedData;
6514  m_DedicatedAllocation.m_Prev = VMA_NULL;
6515  m_DedicatedAllocation.m_Next = VMA_NULL;
6516  }
6517 
6518  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
6519  VkDeviceSize GetAlignment() const { return m_Alignment; }
6520  VkDeviceSize GetSize() const { return m_Size; }
6521  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
6522  void* GetUserData() const { return m_pUserData; }
6523  void SetUserData(VmaAllocator hAllocator, void* pUserData);
6524  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
6525 
6526  VmaDeviceMemoryBlock* GetBlock() const
6527  {
6528  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6529  return m_BlockAllocation.m_Block;
6530  }
6531  VkDeviceSize GetOffset() const;
6532  VkDeviceMemory GetMemory() const;
6533  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6534  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
6535  void* GetMappedData() const;
6536  bool CanBecomeLost() const;
6537 
6538  uint32_t GetLastUseFrameIndex() const
6539  {
6540  return m_LastUseFrameIndex.load();
6541  }
6542  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
6543  {
6544  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
6545  }
6546  /*
6547  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
6548  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
6549  - Else, returns false.
6550 
6551  If hAllocation is already lost, assert - you should not call it then.
6552  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
6553  */
6554  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6555 
6556  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
6557  {
6558  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
6559  outInfo.blockCount = 1;
6560  outInfo.allocationCount = 1;
6561  outInfo.unusedRangeCount = 0;
6562  outInfo.usedBytes = m_Size;
6563  outInfo.unusedBytes = 0;
6564  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
6565  outInfo.unusedRangeSizeMin = UINT64_MAX;
6566  outInfo.unusedRangeSizeMax = 0;
6567  }
6568 
6569  void BlockAllocMap();
6570  void BlockAllocUnmap();
6571  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
6572  void DedicatedAllocUnmap(VmaAllocator hAllocator);
6573 
6574 #if VMA_STATS_STRING_ENABLED
6575  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
6576  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
6577 
6578  void InitBufferImageUsage(uint32_t bufferImageUsage)
6579  {
6580  VMA_ASSERT(m_BufferImageUsage == 0);
6581  m_BufferImageUsage = bufferImageUsage;
6582  }
6583 
6584  void PrintParameters(class VmaJsonWriter& json) const;
6585 #endif
6586 
6587 private:
6588  VkDeviceSize m_Alignment;
6589  VkDeviceSize m_Size;
6590  void* m_pUserData;
6591  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
6592  uint32_t m_MemoryTypeIndex;
6593  uint8_t m_Type; // ALLOCATION_TYPE
6594  uint8_t m_SuballocationType; // VmaSuballocationType
6595  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
6596  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
6597  uint8_t m_MapCount;
6598  uint8_t m_Flags; // enum FLAGS
6599 
6600  // Allocation out of VmaDeviceMemoryBlock.
6601  struct BlockAllocation
6602  {
6603  VmaDeviceMemoryBlock* m_Block;
6604  VkDeviceSize m_Offset;
6605  bool m_CanBecomeLost;
6606  };
6607 
6608  // Allocation for an object that has its own private VkDeviceMemory.
6609  struct DedicatedAllocation
6610  {
6611  VkDeviceMemory m_hMemory;
6612  void* m_pMappedData; // Not null means memory is mapped.
6613  VmaAllocation_T* m_Prev;
6614  VmaAllocation_T* m_Next;
6615  };
6616 
6617  union
6618  {
6619  // Allocation out of VmaDeviceMemoryBlock.
6620  BlockAllocation m_BlockAllocation;
6621  // Allocation for an object that has its own private VkDeviceMemory.
6622  DedicatedAllocation m_DedicatedAllocation;
6623  };
6624 
6625 #if VMA_STATS_STRING_ENABLED
6626  uint32_t m_CreationFrameIndex;
6627  uint32_t m_BufferImageUsage; // 0 if unknown.
6628 #endif
6629 
6630  void FreeUserDataString(VmaAllocator hAllocator);
6631 
6632  friend struct VmaDedicatedAllocationListItemTraits;
6633 };
6634 
6635 struct VmaDedicatedAllocationListItemTraits
6636 {
6637  typedef VmaAllocation_T ItemType;
6638  static ItemType* GetPrev(const ItemType* item)
6639  {
6640  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6641  return item->m_DedicatedAllocation.m_Prev;
6642  }
6643  static ItemType* GetNext(const ItemType* item)
6644  {
6645  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6646  return item->m_DedicatedAllocation.m_Next;
6647  }
6648  static ItemType*& AccessPrev(ItemType* item)
6649  {
6650  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6651  return item->m_DedicatedAllocation.m_Prev;
6652  }
6653  static ItemType*& AccessNext(ItemType* item){
6654  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6655  return item->m_DedicatedAllocation.m_Next;
6656  }
6657 };
6658 
6659 /*
6660 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
6661 allocated memory block or free.
6662 */
6663 struct VmaSuballocation
6664 {
6665  VkDeviceSize offset;
6666  VkDeviceSize size;
6667  VmaAllocation hAllocation;
6668  VmaSuballocationType type;
6669 };
6670 
6671 // Comparator for offsets.
6672 struct VmaSuballocationOffsetLess
6673 {
6674  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6675  {
6676  return lhs.offset < rhs.offset;
6677  }
6678 };
6679 struct VmaSuballocationOffsetGreater
6680 {
6681  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6682  {
6683  return lhs.offset > rhs.offset;
6684  }
6685 };
6686 
6687 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
6688 
6689 // Cost of one additional allocation lost, as equivalent in bytes.
6690 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
6691 
6692 enum class VmaAllocationRequestType
6693 {
6694  Normal,
6695  // Used by "Linear" algorithm.
6696  UpperAddress,
6697  EndOf1st,
6698  EndOf2nd,
6699 };
6700 
6701 /*
6702 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
6703 
6704 If canMakeOtherLost was false:
6705 - item points to a FREE suballocation.
6706 - itemsToMakeLostCount is 0.
6707 
6708 If canMakeOtherLost was true:
6709 - item points to first of sequence of suballocations, which are either FREE,
6710  or point to VmaAllocations that can become lost.
6711 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
6712  the requested allocation to succeed.
6713 */
6714 struct VmaAllocationRequest
6715 {
6716  VkDeviceSize offset;
6717  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
6718  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
6719  VmaSuballocationList::iterator item;
6720  size_t itemsToMakeLostCount;
6721  void* customData;
6722  VmaAllocationRequestType type;
6723 
6724  VkDeviceSize CalcCost() const
6725  {
6726  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
6727  }
6728 };
6729 
6730 /*
6731 Data structure used for bookkeeping of allocations and unused ranges of memory
6732 in a single VkDeviceMemory block.
6733 */
6734 class VmaBlockMetadata
6735 {
6736 public:
6737  VmaBlockMetadata(VmaAllocator hAllocator);
6738  virtual ~VmaBlockMetadata() { }
6739  virtual void Init(VkDeviceSize size) { m_Size = size; }
6740 
6741  // Validates all data structures inside this object. If not valid, returns false.
6742  virtual bool Validate() const = 0;
6743  VkDeviceSize GetSize() const { return m_Size; }
6744  virtual size_t GetAllocationCount() const = 0;
6745  virtual VkDeviceSize GetSumFreeSize() const = 0;
6746  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
6747  // Returns true if this block is empty - contains only single free suballocation.
6748  virtual bool IsEmpty() const = 0;
6749 
6750  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
6751  // Shouldn't modify blockCount.
6752  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
6753 
6754 #if VMA_STATS_STRING_ENABLED
6755  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
6756 #endif
6757 
6758  // Tries to find a place for suballocation with given parameters inside this block.
6759  // If succeeded, fills pAllocationRequest and returns true.
6760  // If failed, returns false.
6761  virtual bool CreateAllocationRequest(
6762  uint32_t currentFrameIndex,
6763  uint32_t frameInUseCount,
6764  VkDeviceSize bufferImageGranularity,
6765  VkDeviceSize allocSize,
6766  VkDeviceSize allocAlignment,
6767  bool upperAddress,
6768  VmaSuballocationType allocType,
6769  bool canMakeOtherLost,
6770  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6771  uint32_t strategy,
6772  VmaAllocationRequest* pAllocationRequest) = 0;
6773 
6774  virtual bool MakeRequestedAllocationsLost(
6775  uint32_t currentFrameIndex,
6776  uint32_t frameInUseCount,
6777  VmaAllocationRequest* pAllocationRequest) = 0;
6778 
6779  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
6780 
6781  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6782 
6783  // Makes actual allocation based on request. Request must already be checked and valid.
6784  virtual void Alloc(
6785  const VmaAllocationRequest& request,
6786  VmaSuballocationType type,
6787  VkDeviceSize allocSize,
6788  VmaAllocation hAllocation) = 0;
6789 
6790  // Frees suballocation assigned to given memory region.
6791  virtual void Free(const VmaAllocation allocation) = 0;
6792  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
6793 
6794 protected:
6795  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
6796 
6797 #if VMA_STATS_STRING_ENABLED
6798  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6799  VkDeviceSize unusedBytes,
6800  size_t allocationCount,
6801  size_t unusedRangeCount) const;
6802  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6803  VkDeviceSize offset,
6804  VmaAllocation hAllocation) const;
6805  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6806  VkDeviceSize offset,
6807  VkDeviceSize size) const;
6808  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6809 #endif
6810 
6811 private:
6812  VkDeviceSize m_Size;
6813  const VkAllocationCallbacks* m_pAllocationCallbacks;
6814 };
6815 
6816 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
6817  VMA_ASSERT(0 && "Validation failed: " #cond); \
6818  return false; \
6819  } } while(false)
6820 
6821 class VmaBlockMetadata_Generic : public VmaBlockMetadata
6822 {
6823  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
6824 public:
6825  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
6826  virtual ~VmaBlockMetadata_Generic();
6827  virtual void Init(VkDeviceSize size);
6828 
6829  virtual bool Validate() const;
6830  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
6831  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6832  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6833  virtual bool IsEmpty() const;
6834 
6835  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6836  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6837 
6838 #if VMA_STATS_STRING_ENABLED
6839  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6840 #endif
6841 
6842  virtual bool CreateAllocationRequest(
6843  uint32_t currentFrameIndex,
6844  uint32_t frameInUseCount,
6845  VkDeviceSize bufferImageGranularity,
6846  VkDeviceSize allocSize,
6847  VkDeviceSize allocAlignment,
6848  bool upperAddress,
6849  VmaSuballocationType allocType,
6850  bool canMakeOtherLost,
6851  uint32_t strategy,
6852  VmaAllocationRequest* pAllocationRequest);
6853 
6854  virtual bool MakeRequestedAllocationsLost(
6855  uint32_t currentFrameIndex,
6856  uint32_t frameInUseCount,
6857  VmaAllocationRequest* pAllocationRequest);
6858 
6859  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6860 
6861  virtual VkResult CheckCorruption(const void* pBlockData);
6862 
6863  virtual void Alloc(
6864  const VmaAllocationRequest& request,
6865  VmaSuballocationType type,
6866  VkDeviceSize allocSize,
6867  VmaAllocation hAllocation);
6868 
6869  virtual void Free(const VmaAllocation allocation);
6870  virtual void FreeAtOffset(VkDeviceSize offset);
6871 
6873  // For defragmentation
6874 
6875  bool IsBufferImageGranularityConflictPossible(
6876  VkDeviceSize bufferImageGranularity,
6877  VmaSuballocationType& inOutPrevSuballocType) const;
6878 
6879 private:
6880  friend class VmaDefragmentationAlgorithm_Generic;
6881  friend class VmaDefragmentationAlgorithm_Fast;
6882 
6883  uint32_t m_FreeCount;
6884  VkDeviceSize m_SumFreeSize;
6885  VmaSuballocationList m_Suballocations;
6886  // Suballocations that are free and have size greater than certain threshold.
6887  // Sorted by size, ascending.
6888  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
6889 
6890  bool ValidateFreeSuballocationList() const;
6891 
6892  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6893  // If yes, fills pOffset and returns true. If no, returns false.
6894  bool CheckAllocation(
6895  uint32_t currentFrameIndex,
6896  uint32_t frameInUseCount,
6897  VkDeviceSize bufferImageGranularity,
6898  VkDeviceSize allocSize,
6899  VkDeviceSize allocAlignment,
6900  VmaSuballocationType allocType,
6901  VmaSuballocationList::const_iterator suballocItem,
6902  bool canMakeOtherLost,
6903  VkDeviceSize* pOffset,
6904  size_t* itemsToMakeLostCount,
6905  VkDeviceSize* pSumFreeSize,
6906  VkDeviceSize* pSumItemSize) const;
6907  // Given free suballocation, it merges it with following one, which must also be free.
6908  void MergeFreeWithNext(VmaSuballocationList::iterator item);
6909  // Releases given suballocation, making it free.
6910  // Merges it with adjacent free suballocations if applicable.
6911  // Returns iterator to new free suballocation at this place.
6912  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6913  // Given free suballocation, it inserts it into sorted list of
6914  // m_FreeSuballocationsBySize if it's suitable.
6915  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6916  // Given free suballocation, it removes it from sorted list of
6917  // m_FreeSuballocationsBySize if it's suitable.
6918  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6919 };
6920 
6921 /*
6922 Allocations and their references in internal data structure look like this:
6923 
6924 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6925 
6926  0 +-------+
6927  | |
6928  | |
6929  | |
6930  +-------+
6931  | Alloc | 1st[m_1stNullItemsBeginCount]
6932  +-------+
6933  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6934  +-------+
6935  | ... |
6936  +-------+
6937  | Alloc | 1st[1st.size() - 1]
6938  +-------+
6939  | |
6940  | |
6941  | |
6942 GetSize() +-------+
6943 
6944 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6945 
6946  0 +-------+
6947  | Alloc | 2nd[0]
6948  +-------+
6949  | Alloc | 2nd[1]
6950  +-------+
6951  | ... |
6952  +-------+
6953  | Alloc | 2nd[2nd.size() - 1]
6954  +-------+
6955  | |
6956  | |
6957  | |
6958  +-------+
6959  | Alloc | 1st[m_1stNullItemsBeginCount]
6960  +-------+
6961  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6962  +-------+
6963  | ... |
6964  +-------+
6965  | Alloc | 1st[1st.size() - 1]
6966  +-------+
6967  | |
6968 GetSize() +-------+
6969 
6970 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6971 
6972  0 +-------+
6973  | |
6974  | |
6975  | |
6976  +-------+
6977  | Alloc | 1st[m_1stNullItemsBeginCount]
6978  +-------+
6979  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6980  +-------+
6981  | ... |
6982  +-------+
6983  | Alloc | 1st[1st.size() - 1]
6984  +-------+
6985  | |
6986  | |
6987  | |
6988  +-------+
6989  | Alloc | 2nd[2nd.size() - 1]
6990  +-------+
6991  | ... |
6992  +-------+
6993  | Alloc | 2nd[1]
6994  +-------+
6995  | Alloc | 2nd[0]
6996 GetSize() +-------+
6997 
6998 */
6999 class VmaBlockMetadata_Linear : public VmaBlockMetadata
7000 {
7001  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
7002 public:
7003  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
7004  virtual ~VmaBlockMetadata_Linear();
7005  virtual void Init(VkDeviceSize size);
7006 
7007  virtual bool Validate() const;
7008  virtual size_t GetAllocationCount() const;
7009  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
7010  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
7011  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
7012 
7013  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
7014  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
7015 
7016 #if VMA_STATS_STRING_ENABLED
7017  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
7018 #endif
7019 
7020  virtual bool CreateAllocationRequest(
7021  uint32_t currentFrameIndex,
7022  uint32_t frameInUseCount,
7023  VkDeviceSize bufferImageGranularity,
7024  VkDeviceSize allocSize,
7025  VkDeviceSize allocAlignment,
7026  bool upperAddress,
7027  VmaSuballocationType allocType,
7028  bool canMakeOtherLost,
7029  uint32_t strategy,
7030  VmaAllocationRequest* pAllocationRequest);
7031 
7032  virtual bool MakeRequestedAllocationsLost(
7033  uint32_t currentFrameIndex,
7034  uint32_t frameInUseCount,
7035  VmaAllocationRequest* pAllocationRequest);
7036 
7037  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
7038 
7039  virtual VkResult CheckCorruption(const void* pBlockData);
7040 
7041  virtual void Alloc(
7042  const VmaAllocationRequest& request,
7043  VmaSuballocationType type,
7044  VkDeviceSize allocSize,
7045  VmaAllocation hAllocation);
7046 
7047  virtual void Free(const VmaAllocation allocation);
7048  virtual void FreeAtOffset(VkDeviceSize offset);
7049 
7050 private:
7051  /*
7052  There are two suballocation vectors, used in ping-pong way.
7053  The one with index m_1stVectorIndex is called 1st.
7054  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
7055  2nd can be non-empty only when 1st is not empty.
7056  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
7057  */
7058  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
7059 
7060  enum SECOND_VECTOR_MODE
7061  {
7062  SECOND_VECTOR_EMPTY,
7063  /*
7064  Suballocations in 2nd vector are created later than the ones in 1st, but they
7065  all have smaller offset.
7066  */
7067  SECOND_VECTOR_RING_BUFFER,
7068  /*
7069  Suballocations in 2nd vector are upper side of double stack.
7070  They all have offsets higher than those in 1st vector.
7071  Top of this stack means smaller offsets, but higher indices in this vector.
7072  */
7073  SECOND_VECTOR_DOUBLE_STACK,
7074  };
7075 
7076  VkDeviceSize m_SumFreeSize;
7077  SuballocationVectorType m_Suballocations0, m_Suballocations1;
7078  uint32_t m_1stVectorIndex;
7079  SECOND_VECTOR_MODE m_2ndVectorMode;
7080 
7081  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
7082  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
7083  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
7084  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
7085 
7086  // Number of items in 1st vector with hAllocation = null at the beginning.
7087  size_t m_1stNullItemsBeginCount;
7088  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
7089  size_t m_1stNullItemsMiddleCount;
7090  // Number of items in 2nd vector with hAllocation = null.
7091  size_t m_2ndNullItemsCount;
7092 
7093  bool ShouldCompact1st() const;
7094  void CleanupAfterFree();
7095 
7096  bool CreateAllocationRequest_LowerAddress(
7097  uint32_t currentFrameIndex,
7098  uint32_t frameInUseCount,
7099  VkDeviceSize bufferImageGranularity,
7100  VkDeviceSize allocSize,
7101  VkDeviceSize allocAlignment,
7102  VmaSuballocationType allocType,
7103  bool canMakeOtherLost,
7104  uint32_t strategy,
7105  VmaAllocationRequest* pAllocationRequest);
7106  bool CreateAllocationRequest_UpperAddress(
7107  uint32_t currentFrameIndex,
7108  uint32_t frameInUseCount,
7109  VkDeviceSize bufferImageGranularity,
7110  VkDeviceSize allocSize,
7111  VkDeviceSize allocAlignment,
7112  VmaSuballocationType allocType,
7113  bool canMakeOtherLost,
7114  uint32_t strategy,
7115  VmaAllocationRequest* pAllocationRequest);
7116 };
7117 
7118 /*
7119 - GetSize() is the original size of allocated memory block.
7120 - m_UsableSize is this size aligned down to a power of two.
7121  All allocations and calculations happen relative to m_UsableSize.
7122 - GetUnusableSize() is the difference between them.
7123  It is reported as separate, unused range, not available for allocations.
7124 
7125 Node at level 0 has size = m_UsableSize.
7126 Each next level contains nodes with size 2 times smaller than current level.
7127 m_LevelCount is the maximum number of levels to use in the current object.
7128 */
7129 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
7130 {
7131  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
7132 public:
7133  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
7134  virtual ~VmaBlockMetadata_Buddy();
7135  virtual void Init(VkDeviceSize size);
7136 
7137  virtual bool Validate() const;
7138  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
7139  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
7140  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
7141  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
7142 
7143  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
7144  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
7145 
7146 #if VMA_STATS_STRING_ENABLED
7147  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
7148 #endif
7149 
7150  virtual bool CreateAllocationRequest(
7151  uint32_t currentFrameIndex,
7152  uint32_t frameInUseCount,
7153  VkDeviceSize bufferImageGranularity,
7154  VkDeviceSize allocSize,
7155  VkDeviceSize allocAlignment,
7156  bool upperAddress,
7157  VmaSuballocationType allocType,
7158  bool canMakeOtherLost,
7159  uint32_t strategy,
7160  VmaAllocationRequest* pAllocationRequest);
7161 
7162  virtual bool MakeRequestedAllocationsLost(
7163  uint32_t currentFrameIndex,
7164  uint32_t frameInUseCount,
7165  VmaAllocationRequest* pAllocationRequest);
7166 
7167  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
7168 
7169  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
7170 
7171  virtual void Alloc(
7172  const VmaAllocationRequest& request,
7173  VmaSuballocationType type,
7174  VkDeviceSize allocSize,
7175  VmaAllocation hAllocation);
7176 
7177  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
7178  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
7179 
7180 private:
7181  static const VkDeviceSize MIN_NODE_SIZE = 32;
7182  static const size_t MAX_LEVELS = 30;
7183 
7184  struct ValidationContext
7185  {
7186  size_t calculatedAllocationCount;
7187  size_t calculatedFreeCount;
7188  VkDeviceSize calculatedSumFreeSize;
7189 
7190  ValidationContext() :
7191  calculatedAllocationCount(0),
7192  calculatedFreeCount(0),
7193  calculatedSumFreeSize(0) { }
7194  };
7195 
7196  struct Node
7197  {
7198  VkDeviceSize offset;
7199  enum TYPE
7200  {
7201  TYPE_FREE,
7202  TYPE_ALLOCATION,
7203  TYPE_SPLIT,
7204  TYPE_COUNT
7205  } type;
7206  Node* parent;
7207  Node* buddy;
7208 
7209  union
7210  {
7211  struct
7212  {
7213  Node* prev;
7214  Node* next;
7215  } free;
7216  struct
7217  {
7218  VmaAllocation alloc;
7219  } allocation;
7220  struct
7221  {
7222  Node* leftChild;
7223  } split;
7224  };
7225  };
7226 
7227  // Size of the memory block aligned down to a power of two.
7228  VkDeviceSize m_UsableSize;
7229  uint32_t m_LevelCount;
7230 
7231  Node* m_Root;
7232  struct {
7233  Node* front;
7234  Node* back;
7235  } m_FreeList[MAX_LEVELS];
7236  // Number of nodes in the tree with type == TYPE_ALLOCATION.
7237  size_t m_AllocationCount;
7238  // Number of nodes in the tree with type == TYPE_FREE.
7239  size_t m_FreeCount;
7240  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
7241  VkDeviceSize m_SumFreeSize;
7242 
7243  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
7244  void DeleteNode(Node* node);
7245  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
7246  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
7247  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
7248  // Alloc passed just for validation. Can be null.
7249  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
7250  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
7251  // Adds node to the front of FreeList at given level.
7252  // node->type must be FREE.
7253  // node->free.prev, next can be undefined.
7254  void AddToFreeListFront(uint32_t level, Node* node);
7255  // Removes node from FreeList at given level.
7256  // node->type must be FREE.
7257  // node->free.prev, next stay untouched.
7258  void RemoveFromFreeList(uint32_t level, Node* node);
7259 
7260 #if VMA_STATS_STRING_ENABLED
7261  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
7262 #endif
7263 };
7264 
7265 /*
7266 Represents a single block of device memory (`VkDeviceMemory`) with all the
7267 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
7268 
7269 Thread-safety: This class must be externally synchronized.
7270 */
7271 class VmaDeviceMemoryBlock
7272 {
7273  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
7274 public:
7275  VmaBlockMetadata* m_pMetadata;
7276 
7277  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
7278 
7279  ~VmaDeviceMemoryBlock()
7280  {
7281  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
7282  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
7283  }
7284 
7285  // Always call after construction.
7286  void Init(
7287  VmaAllocator hAllocator,
7288  VmaPool hParentPool,
7289  uint32_t newMemoryTypeIndex,
7290  VkDeviceMemory newMemory,
7291  VkDeviceSize newSize,
7292  uint32_t id,
7293  uint32_t algorithm);
7294  // Always call before destruction.
7295  void Destroy(VmaAllocator allocator);
7296 
7297  VmaPool GetParentPool() const { return m_hParentPool; }
7298  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
7299  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
7300  uint32_t GetId() const { return m_Id; }
7301  void* GetMappedData() const { return m_pMappedData; }
7302 
7303  // Validates all data structures inside this object. If not valid, returns false.
7304  bool Validate() const;
7305 
7306  VkResult CheckCorruption(VmaAllocator hAllocator);
7307 
7308  // ppData can be null.
7309  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
7310  void Unmap(VmaAllocator hAllocator, uint32_t count);
7311 
7312  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
7313  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
7314 
7315  VkResult BindBufferMemory(
7316  const VmaAllocator hAllocator,
7317  const VmaAllocation hAllocation,
7318  VkDeviceSize allocationLocalOffset,
7319  VkBuffer hBuffer,
7320  const void* pNext);
7321  VkResult BindImageMemory(
7322  const VmaAllocator hAllocator,
7323  const VmaAllocation hAllocation,
7324  VkDeviceSize allocationLocalOffset,
7325  VkImage hImage,
7326  const void* pNext);
7327 
7328 private:
7329  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
7330  uint32_t m_MemoryTypeIndex;
7331  uint32_t m_Id;
7332  VkDeviceMemory m_hMemory;
7333 
7334  /*
7335  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
7336  Also protects m_MapCount, m_pMappedData.
7337  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
7338  */
7339  VMA_MUTEX m_Mutex;
7340  uint32_t m_MapCount;
7341  void* m_pMappedData;
7342 };
7343 
7344 struct VmaDefragmentationMove
7345 {
7346  size_t srcBlockIndex;
7347  size_t dstBlockIndex;
7348  VkDeviceSize srcOffset;
7349  VkDeviceSize dstOffset;
7350  VkDeviceSize size;
7351  VmaAllocation hAllocation;
7352  VmaDeviceMemoryBlock* pSrcBlock;
7353  VmaDeviceMemoryBlock* pDstBlock;
7354 };
7355 
7356 class VmaDefragmentationAlgorithm;
7357 
7358 /*
7359 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
7360 Vulkan memory type.
7361 
7362 Synchronized internally with a mutex.
7363 */
7364 struct VmaBlockVector
7365 {
7366  VMA_CLASS_NO_COPY(VmaBlockVector)
7367 public:
7368  VmaBlockVector(
7369  VmaAllocator hAllocator,
7370  VmaPool hParentPool,
7371  uint32_t memoryTypeIndex,
7372  VkDeviceSize preferredBlockSize,
7373  size_t minBlockCount,
7374  size_t maxBlockCount,
7375  VkDeviceSize bufferImageGranularity,
7376  uint32_t frameInUseCount,
7377  bool explicitBlockSize,
7378  uint32_t algorithm,
7379  float priority,
7380  VkDeviceSize minAllocationAlignment,
7381  void* pMemoryAllocateNext);
7382  ~VmaBlockVector();
7383 
7384  VkResult CreateMinBlocks();
7385 
7386  VmaAllocator GetAllocator() const { return m_hAllocator; }
7387  VmaPool GetParentPool() const { return m_hParentPool; }
7388  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
7389  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
7390  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
7391  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
7392  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
7393  uint32_t GetAlgorithm() const { return m_Algorithm; }
7394 
7395  void GetPoolStats(VmaPoolStats* pStats);
7396 
7397  bool IsEmpty();
7398  bool IsCorruptionDetectionEnabled() const;
7399 
7400  VkResult Allocate(
7401  uint32_t currentFrameIndex,
7402  VkDeviceSize size,
7403  VkDeviceSize alignment,
7404  const VmaAllocationCreateInfo& createInfo,
7405  VmaSuballocationType suballocType,
7406  size_t allocationCount,
7407  VmaAllocation* pAllocations);
7408 
7409  void Free(const VmaAllocation hAllocation);
7410 
7411  // Adds statistics of this BlockVector to pStats.
7412  void AddStats(VmaStats* pStats);
7413 
7414 #if VMA_STATS_STRING_ENABLED
7415  void PrintDetailedMap(class VmaJsonWriter& json);
7416 #endif
7417 
7418  void MakePoolAllocationsLost(
7419  uint32_t currentFrameIndex,
7420  size_t* pLostAllocationCount);
7421  VkResult CheckCorruption();
7422 
7423  // Saves results in pCtx->res.
7424  void Defragment(
7425  class VmaBlockVectorDefragmentationContext* pCtx,
7427  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
7428  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
7429  VkCommandBuffer commandBuffer);
7430  void DefragmentationEnd(
7431  class VmaBlockVectorDefragmentationContext* pCtx,
7432  uint32_t flags,
7433  VmaDefragmentationStats* pStats);
7434 
7435  uint32_t ProcessDefragmentations(
7436  class VmaBlockVectorDefragmentationContext *pCtx,
7437  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
7438 
7439  void CommitDefragmentations(
7440  class VmaBlockVectorDefragmentationContext *pCtx,
7441  VmaDefragmentationStats* pStats);
7442 
7444  // To be used only while the m_Mutex is locked. Used during defragmentation.
7445 
7446  size_t GetBlockCount() const { return m_Blocks.size(); }
7447  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
7448  size_t CalcAllocationCount() const;
7449  bool IsBufferImageGranularityConflictPossible() const;
7450 
7451 private:
7452  friend class VmaDefragmentationAlgorithm_Generic;
7453 
7454  const VmaAllocator m_hAllocator;
7455  const VmaPool m_hParentPool;
7456  const uint32_t m_MemoryTypeIndex;
7457  const VkDeviceSize m_PreferredBlockSize;
7458  const size_t m_MinBlockCount;
7459  const size_t m_MaxBlockCount;
7460  const VkDeviceSize m_BufferImageGranularity;
7461  const uint32_t m_FrameInUseCount;
7462  const bool m_ExplicitBlockSize;
7463  const uint32_t m_Algorithm;
7464  const float m_Priority;
7465  const VkDeviceSize m_MinAllocationAlignment;
7466  void* const m_pMemoryAllocateNext;
7467  VMA_RW_MUTEX m_Mutex;
7468 
7469  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
7470  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
7471  bool m_HasEmptyBlock;
7472  // Incrementally sorted by sumFreeSize, ascending.
7473  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
7474  uint32_t m_NextBlockId;
7475 
7476  VkDeviceSize CalcMaxBlockSize() const;
7477 
7478  // Finds and removes given block from vector.
7479  void Remove(VmaDeviceMemoryBlock* pBlock);
7480 
7481  // Performs single step in sorting m_Blocks. They may not be fully sorted
7482  // after this call.
7483  void IncrementallySortBlocks();
7484 
7485  VkResult AllocatePage(
7486  uint32_t currentFrameIndex,
7487  VkDeviceSize size,
7488  VkDeviceSize alignment,
7489  const VmaAllocationCreateInfo& createInfo,
7490  VmaSuballocationType suballocType,
7491  VmaAllocation* pAllocation);
7492 
7493  // To be used only without CAN_MAKE_OTHER_LOST flag.
7494  VkResult AllocateFromBlock(
7495  VmaDeviceMemoryBlock* pBlock,
7496  uint32_t currentFrameIndex,
7497  VkDeviceSize size,
7498  VkDeviceSize alignment,
7499  VmaAllocationCreateFlags allocFlags,
7500  void* pUserData,
7501  VmaSuballocationType suballocType,
7502  uint32_t strategy,
7503  VmaAllocation* pAllocation);
7504 
7505  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
7506 
7507  // Saves result to pCtx->res.
7508  void ApplyDefragmentationMovesCpu(
7509  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7510  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
7511  // Saves result to pCtx->res.
7512  void ApplyDefragmentationMovesGpu(
7513  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7514  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7515  VkCommandBuffer commandBuffer);
7516 
7517  /*
7518  Used during defragmentation. pDefragmentationStats is optional. It's in/out
7519  - updated with new data.
7520  */
7521  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
7522 
7523  void UpdateHasEmptyBlock();
7524 };
7525 
7526 struct VmaPool_T
7527 {
7528  VMA_CLASS_NO_COPY(VmaPool_T)
7529 public:
7530  VmaBlockVector m_BlockVector;
7531 
7532  VmaPool_T(
7533  VmaAllocator hAllocator,
7534  const VmaPoolCreateInfo& createInfo,
7535  VkDeviceSize preferredBlockSize);
7536  ~VmaPool_T();
7537 
7538  uint32_t GetId() const { return m_Id; }
7539  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
7540 
7541  const char* GetName() const { return m_Name; }
7542  void SetName(const char* pName);
7543 
7544 #if VMA_STATS_STRING_ENABLED
7545  //void PrintDetailedMap(class VmaStringBuilder& sb);
7546 #endif
7547 
7548 private:
7549  uint32_t m_Id;
7550  char* m_Name;
7551  VmaPool_T* m_PrevPool = VMA_NULL;
7552  VmaPool_T* m_NextPool = VMA_NULL;
7553  friend struct VmaPoolListItemTraits;
7554 };
7555 
7556 struct VmaPoolListItemTraits
7557 {
7558  typedef VmaPool_T ItemType;
7559  static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
7560  static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
7561  static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
7562  static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
7563 };
7564 
7565 /*
7566 Performs defragmentation:
7567 
7568 - Updates `pBlockVector->m_pMetadata`.
7569 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
7570 - Does not move actual data, only returns requested moves as `moves`.
7571 */
7572 class VmaDefragmentationAlgorithm
7573 {
7574  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
7575 public:
7576  VmaDefragmentationAlgorithm(
7577  VmaAllocator hAllocator,
7578  VmaBlockVector* pBlockVector,
7579  uint32_t currentFrameIndex) :
7580  m_hAllocator(hAllocator),
7581  m_pBlockVector(pBlockVector),
7582  m_CurrentFrameIndex(currentFrameIndex)
7583  {
7584  }
7585  virtual ~VmaDefragmentationAlgorithm()
7586  {
7587  }
7588 
7589  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
7590  virtual void AddAll() = 0;
7591 
7592  virtual VkResult Defragment(
7593  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7594  VkDeviceSize maxBytesToMove,
7595  uint32_t maxAllocationsToMove,
7596  VmaDefragmentationFlags flags) = 0;
7597 
7598  virtual VkDeviceSize GetBytesMoved() const = 0;
7599  virtual uint32_t GetAllocationsMoved() const = 0;
7600 
7601 protected:
7602  VmaAllocator const m_hAllocator;
7603  VmaBlockVector* const m_pBlockVector;
7604  const uint32_t m_CurrentFrameIndex;
7605 
7606  struct AllocationInfo
7607  {
7608  VmaAllocation m_hAllocation;
7609  VkBool32* m_pChanged;
7610 
7611  AllocationInfo() :
7612  m_hAllocation(VK_NULL_HANDLE),
7613  m_pChanged(VMA_NULL)
7614  {
7615  }
7616  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
7617  m_hAllocation(hAlloc),
7618  m_pChanged(pChanged)
7619  {
7620  }
7621  };
7622 };
7623 
7624 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
7625 {
7626  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
7627 public:
7628  VmaDefragmentationAlgorithm_Generic(
7629  VmaAllocator hAllocator,
7630  VmaBlockVector* pBlockVector,
7631  uint32_t currentFrameIndex,
7632  bool overlappingMoveSupported);
7633  virtual ~VmaDefragmentationAlgorithm_Generic();
7634 
7635  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7636  virtual void AddAll() { m_AllAllocations = true; }
7637 
7638  virtual VkResult Defragment(
7639  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7640  VkDeviceSize maxBytesToMove,
7641  uint32_t maxAllocationsToMove,
7642  VmaDefragmentationFlags flags);
7643 
7644  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7645  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7646 
7647 private:
7648  uint32_t m_AllocationCount;
7649  bool m_AllAllocations;
7650 
7651  VkDeviceSize m_BytesMoved;
7652  uint32_t m_AllocationsMoved;
7653 
7654  struct AllocationInfoSizeGreater
7655  {
7656  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7657  {
7658  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
7659  }
7660  };
7661 
7662  struct AllocationInfoOffsetGreater
7663  {
7664  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7665  {
7666  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
7667  }
7668  };
7669 
7670  struct BlockInfo
7671  {
7672  size_t m_OriginalBlockIndex;
7673  VmaDeviceMemoryBlock* m_pBlock;
7674  bool m_HasNonMovableAllocations;
7675  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
7676 
7677  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
7678  m_OriginalBlockIndex(SIZE_MAX),
7679  m_pBlock(VMA_NULL),
7680  m_HasNonMovableAllocations(true),
7681  m_Allocations(pAllocationCallbacks)
7682  {
7683  }
7684 
7685  void CalcHasNonMovableAllocations()
7686  {
7687  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
7688  const size_t defragmentAllocCount = m_Allocations.size();
7689  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
7690  }
7691 
7692  void SortAllocationsBySizeDescending()
7693  {
7694  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
7695  }
7696 
7697  void SortAllocationsByOffsetDescending()
7698  {
7699  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
7700  }
7701  };
7702 
7703  struct BlockPointerLess
7704  {
7705  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
7706  {
7707  return pLhsBlockInfo->m_pBlock < pRhsBlock;
7708  }
7709  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7710  {
7711  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
7712  }
7713  };
7714 
7715  // 1. Blocks with some non-movable allocations go first.
7716  // 2. Blocks with smaller sumFreeSize go first.
7717  struct BlockInfoCompareMoveDestination
7718  {
7719  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7720  {
7721  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
7722  {
7723  return true;
7724  }
7725  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
7726  {
7727  return false;
7728  }
7729  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
7730  {
7731  return true;
7732  }
7733  return false;
7734  }
7735  };
7736 
7737  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
7738  BlockInfoVector m_Blocks;
7739 
7740  VkResult DefragmentRound(
7741  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7742  VkDeviceSize maxBytesToMove,
7743  uint32_t maxAllocationsToMove,
7744  bool freeOldAllocations);
7745 
7746  size_t CalcBlocksWithNonMovableCount() const;
7747 
7748  static bool MoveMakesSense(
7749  size_t dstBlockIndex, VkDeviceSize dstOffset,
7750  size_t srcBlockIndex, VkDeviceSize srcOffset);
7751 };
7752 
7753 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
7754 {
7755  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
7756 public:
7757  VmaDefragmentationAlgorithm_Fast(
7758  VmaAllocator hAllocator,
7759  VmaBlockVector* pBlockVector,
7760  uint32_t currentFrameIndex,
7761  bool overlappingMoveSupported);
7762  virtual ~VmaDefragmentationAlgorithm_Fast();
7763 
7764  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
7765  virtual void AddAll() { m_AllAllocations = true; }
7766 
7767  virtual VkResult Defragment(
7768  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7769  VkDeviceSize maxBytesToMove,
7770  uint32_t maxAllocationsToMove,
7771  VmaDefragmentationFlags flags);
7772 
7773  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7774  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7775 
7776 private:
7777  struct BlockInfo
7778  {
7779  size_t origBlockIndex;
7780  };
7781 
7782  class FreeSpaceDatabase
7783  {
7784  public:
7785  FreeSpaceDatabase()
7786  {
7787  FreeSpace s = {};
7788  s.blockInfoIndex = SIZE_MAX;
7789  for(size_t i = 0; i < MAX_COUNT; ++i)
7790  {
7791  m_FreeSpaces[i] = s;
7792  }
7793  }
7794 
7795  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
7796  {
7797  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7798  {
7799  return;
7800  }
7801 
7802  // Find first invalid or the smallest structure.
7803  size_t bestIndex = SIZE_MAX;
7804  for(size_t i = 0; i < MAX_COUNT; ++i)
7805  {
7806  // Empty structure.
7807  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
7808  {
7809  bestIndex = i;
7810  break;
7811  }
7812  if(m_FreeSpaces[i].size < size &&
7813  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
7814  {
7815  bestIndex = i;
7816  }
7817  }
7818 
7819  if(bestIndex != SIZE_MAX)
7820  {
7821  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
7822  m_FreeSpaces[bestIndex].offset = offset;
7823  m_FreeSpaces[bestIndex].size = size;
7824  }
7825  }
7826 
7827  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
7828  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
7829  {
7830  size_t bestIndex = SIZE_MAX;
7831  VkDeviceSize bestFreeSpaceAfter = 0;
7832  for(size_t i = 0; i < MAX_COUNT; ++i)
7833  {
7834  // Structure is valid.
7835  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
7836  {
7837  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
7838  // Allocation fits into this structure.
7839  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
7840  {
7841  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
7842  (dstOffset + size);
7843  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
7844  {
7845  bestIndex = i;
7846  bestFreeSpaceAfter = freeSpaceAfter;
7847  }
7848  }
7849  }
7850  }
7851 
7852  if(bestIndex != SIZE_MAX)
7853  {
7854  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
7855  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
7856 
7857  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7858  {
7859  // Leave this structure for remaining empty space.
7860  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
7861  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
7862  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
7863  }
7864  else
7865  {
7866  // This structure becomes invalid.
7867  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
7868  }
7869 
7870  return true;
7871  }
7872 
7873  return false;
7874  }
7875 
7876  private:
7877  static const size_t MAX_COUNT = 4;
7878 
7879  struct FreeSpace
7880  {
7881  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
7882  VkDeviceSize offset;
7883  VkDeviceSize size;
7884  } m_FreeSpaces[MAX_COUNT];
7885  };
7886 
7887  const bool m_OverlappingMoveSupported;
7888 
7889  uint32_t m_AllocationCount;
7890  bool m_AllAllocations;
7891 
7892  VkDeviceSize m_BytesMoved;
7893  uint32_t m_AllocationsMoved;
7894 
7895  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
7896 
7897  void PreprocessMetadata();
7898  void PostprocessMetadata();
7899  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
7900 };
7901 
7902 struct VmaBlockDefragmentationContext
7903 {
7904  enum BLOCK_FLAG
7905  {
7906  BLOCK_FLAG_USED = 0x00000001,
7907  };
7908  uint32_t flags;
7909  VkBuffer hBuffer;
7910 };
7911 
7912 class VmaBlockVectorDefragmentationContext
7913 {
7914  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
7915 public:
7916  VkResult res;
7917  bool mutexLocked;
7918  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
7919  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
7920  uint32_t defragmentationMovesProcessed;
7921  uint32_t defragmentationMovesCommitted;
7922  bool hasDefragmentationPlan;
7923 
7924  VmaBlockVectorDefragmentationContext(
7925  VmaAllocator hAllocator,
7926  VmaPool hCustomPool, // Optional.
7927  VmaBlockVector* pBlockVector,
7928  uint32_t currFrameIndex);
7929  ~VmaBlockVectorDefragmentationContext();
7930 
7931  VmaPool GetCustomPool() const { return m_hCustomPool; }
7932  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7933  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7934 
7935  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7936  void AddAll() { m_AllAllocations = true; }
7937 
7938  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7939 
7940 private:
7941  const VmaAllocator m_hAllocator;
7942  // Null if not from custom pool.
7943  const VmaPool m_hCustomPool;
7944  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7945  VmaBlockVector* const m_pBlockVector;
7946  const uint32_t m_CurrFrameIndex;
7947  // Owner of this object.
7948  VmaDefragmentationAlgorithm* m_pAlgorithm;
7949 
7950  struct AllocInfo
7951  {
7952  VmaAllocation hAlloc;
7953  VkBool32* pChanged;
7954  };
7955  // Used between constructor and Begin.
7956  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7957  bool m_AllAllocations;
7958 };
7959 
7960 struct VmaDefragmentationContext_T
7961 {
7962 private:
7963  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7964 public:
7965  VmaDefragmentationContext_T(
7966  VmaAllocator hAllocator,
7967  uint32_t currFrameIndex,
7968  uint32_t flags,
7969  VmaDefragmentationStats* pStats);
7970  ~VmaDefragmentationContext_T();
7971 
7972  void AddPools(uint32_t poolCount, const VmaPool* pPools);
7973  void AddAllocations(
7974  uint32_t allocationCount,
7975  const VmaAllocation* pAllocations,
7976  VkBool32* pAllocationsChanged);
7977 
7978  /*
7979  Returns:
7980  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7981  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7982  - Negative value if error occurred and object can be destroyed immediately.
7983  */
7984  VkResult Defragment(
7985  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
7986  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
7987  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
7988 
7989  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
7990  VkResult DefragmentPassEnd();
7991 
7992 private:
7993  const VmaAllocator m_hAllocator;
7994  const uint32_t m_CurrFrameIndex;
7995  const uint32_t m_Flags;
7996  VmaDefragmentationStats* const m_pStats;
7997 
7998  VkDeviceSize m_MaxCpuBytesToMove;
7999  uint32_t m_MaxCpuAllocationsToMove;
8000  VkDeviceSize m_MaxGpuBytesToMove;
8001  uint32_t m_MaxGpuAllocationsToMove;
8002 
8003  // Owner of these objects.
8004  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
8005  // Owner of these objects.
8006  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
8007 };
8008 
8009 #if VMA_RECORDING_ENABLED
8010 
8011 class VmaRecorder
8012 {
8013 public:
8014  VmaRecorder();
8015  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
8016  void WriteConfiguration(
8017  const VkPhysicalDeviceProperties& devProps,
8018  const VkPhysicalDeviceMemoryProperties& memProps,
8019  uint32_t vulkanApiVersion,
8020  bool dedicatedAllocationExtensionEnabled,
8021  bool bindMemory2ExtensionEnabled,
8022  bool memoryBudgetExtensionEnabled,
8023  bool deviceCoherentMemoryExtensionEnabled);
8024  ~VmaRecorder();
8025 
8026  void RecordCreateAllocator(uint32_t frameIndex);
8027  void RecordDestroyAllocator(uint32_t frameIndex);
8028  void RecordCreatePool(uint32_t frameIndex,
8029  const VmaPoolCreateInfo& createInfo,
8030  VmaPool pool);
8031  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
8032  void RecordAllocateMemory(uint32_t frameIndex,
8033  const VkMemoryRequirements& vkMemReq,
8034  const VmaAllocationCreateInfo& createInfo,
8035  VmaAllocation allocation);
8036  void RecordAllocateMemoryPages(uint32_t frameIndex,
8037  const VkMemoryRequirements& vkMemReq,
8038  const VmaAllocationCreateInfo& createInfo,
8039  uint64_t allocationCount,
8040  const VmaAllocation* pAllocations);
8041  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
8042  const VkMemoryRequirements& vkMemReq,
8043  bool requiresDedicatedAllocation,
8044  bool prefersDedicatedAllocation,
8045  const VmaAllocationCreateInfo& createInfo,
8046  VmaAllocation allocation);
8047  void RecordAllocateMemoryForImage(uint32_t frameIndex,
8048  const VkMemoryRequirements& vkMemReq,
8049  bool requiresDedicatedAllocation,
8050  bool prefersDedicatedAllocation,
8051  const VmaAllocationCreateInfo& createInfo,
8052  VmaAllocation allocation);
8053  void RecordFreeMemory(uint32_t frameIndex,
8054  VmaAllocation allocation);
8055  void RecordFreeMemoryPages(uint32_t frameIndex,
8056  uint64_t allocationCount,
8057  const VmaAllocation* pAllocations);
8058  void RecordSetAllocationUserData(uint32_t frameIndex,
8059  VmaAllocation allocation,
8060  const void* pUserData);
8061  void RecordCreateLostAllocation(uint32_t frameIndex,
8062  VmaAllocation allocation);
8063  void RecordMapMemory(uint32_t frameIndex,
8064  VmaAllocation allocation);
8065  void RecordUnmapMemory(uint32_t frameIndex,
8066  VmaAllocation allocation);
8067  void RecordFlushAllocation(uint32_t frameIndex,
8068  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
8069  void RecordInvalidateAllocation(uint32_t frameIndex,
8070  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
8071  void RecordCreateBuffer(uint32_t frameIndex,
8072  const VkBufferCreateInfo& bufCreateInfo,
8073  const VmaAllocationCreateInfo& allocCreateInfo,
8074  VmaAllocation allocation);
8075  void RecordCreateImage(uint32_t frameIndex,
8076  const VkImageCreateInfo& imageCreateInfo,
8077  const VmaAllocationCreateInfo& allocCreateInfo,
8078  VmaAllocation allocation);
8079  void RecordDestroyBuffer(uint32_t frameIndex,
8080  VmaAllocation allocation);
8081  void RecordDestroyImage(uint32_t frameIndex,
8082  VmaAllocation allocation);
8083  void RecordTouchAllocation(uint32_t frameIndex,
8084  VmaAllocation allocation);
8085  void RecordGetAllocationInfo(uint32_t frameIndex,
8086  VmaAllocation allocation);
8087  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
8088  VmaPool pool);
8089  void RecordDefragmentationBegin(uint32_t frameIndex,
8090  const VmaDefragmentationInfo2& info,
8092  void RecordDefragmentationEnd(uint32_t frameIndex,
8094  void RecordSetPoolName(uint32_t frameIndex,
8095  VmaPool pool,
8096  const char* name);
8097 
8098 private:
8099  struct CallParams
8100  {
8101  uint32_t threadId;
8102  double time;
8103  };
8104 
8105  class UserDataString
8106  {
8107  public:
8108  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
8109  const char* GetString() const { return m_Str; }
8110 
8111  private:
8112  char m_PtrStr[17];
8113  const char* m_Str;
8114  };
8115 
8116  bool m_UseMutex;
8117  VmaRecordFlags m_Flags;
8118  FILE* m_File;
8119  VMA_MUTEX m_FileMutex;
8120  std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
8121 
8122  void GetBasicParams(CallParams& outParams);
8123 
8124  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
8125  template<typename T>
8126  void PrintPointerList(uint64_t count, const T* pItems)
8127  {
8128  if(count)
8129  {
8130  fprintf(m_File, "%p", pItems[0]);
8131  for(uint64_t i = 1; i < count; ++i)
8132  {
8133  fprintf(m_File, " %p", pItems[i]);
8134  }
8135  }
8136  }
8137 
8138  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
8139  void Flush();
8140 };
8141 
8142 #endif // #if VMA_RECORDING_ENABLED
8143 
8144 /*
8145 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
8146 */
8147 class VmaAllocationObjectAllocator
8148 {
8149  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
8150 public:
8151  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
8152 
8153  template<typename... Types> VmaAllocation Allocate(Types... args);
8154  void Free(VmaAllocation hAlloc);
8155 
8156 private:
8157  VMA_MUTEX m_Mutex;
8158  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
8159 };
8160 
8161 struct VmaCurrentBudgetData
8162 {
8163  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
8164  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
8165 
8166 #if VMA_MEMORY_BUDGET
8167  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
8168  VMA_RW_MUTEX m_BudgetMutex;
8169  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
8170  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
8171  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
8172 #endif // #if VMA_MEMORY_BUDGET
8173 
8174  VmaCurrentBudgetData()
8175  {
8176  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
8177  {
8178  m_BlockBytes[heapIndex] = 0;
8179  m_AllocationBytes[heapIndex] = 0;
8180 #if VMA_MEMORY_BUDGET
8181  m_VulkanUsage[heapIndex] = 0;
8182  m_VulkanBudget[heapIndex] = 0;
8183  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
8184 #endif
8185  }
8186 
8187 #if VMA_MEMORY_BUDGET
8188  m_OperationsSinceBudgetFetch = 0;
8189 #endif
8190  }
8191 
8192  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
8193  {
8194  m_AllocationBytes[heapIndex] += allocationSize;
8195 #if VMA_MEMORY_BUDGET
8196  ++m_OperationsSinceBudgetFetch;
8197 #endif
8198  }
8199 
8200  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
8201  {
8202  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
8203  m_AllocationBytes[heapIndex] -= allocationSize;
8204 #if VMA_MEMORY_BUDGET
8205  ++m_OperationsSinceBudgetFetch;
8206 #endif
8207  }
8208 };
8209 
8210 // Main allocator object.
8211 struct VmaAllocator_T
8212 {
8213  VMA_CLASS_NO_COPY(VmaAllocator_T)
8214 public:
8215  bool m_UseMutex;
8216  uint32_t m_VulkanApiVersion;
8217  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
8218  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
8219  bool m_UseExtMemoryBudget;
8220  bool m_UseAmdDeviceCoherentMemory;
8221  bool m_UseKhrBufferDeviceAddress;
8222  bool m_UseExtMemoryPriority;
8223  VkDevice m_hDevice;
8224  VkInstance m_hInstance;
8225  bool m_AllocationCallbacksSpecified;
8226  VkAllocationCallbacks m_AllocationCallbacks;
8227  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
8228  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
8229 
8230  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
8231  uint32_t m_HeapSizeLimitMask;
8232 
8233  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
8234  VkPhysicalDeviceMemoryProperties m_MemProps;
8235 
8236  // Default pools.
8237  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
8238 
8239  typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
8240  DedicatedAllocationLinkedList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
8241  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
8242 
8243  VmaCurrentBudgetData m_Budget;
8244  VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
8245 
8246  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
8247  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
8248  ~VmaAllocator_T();
8249 
8250  const VkAllocationCallbacks* GetAllocationCallbacks() const
8251  {
8252  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
8253  }
8254  const VmaVulkanFunctions& GetVulkanFunctions() const
8255  {
8256  return m_VulkanFunctions;
8257  }
8258 
8259  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
8260 
8261  VkDeviceSize GetBufferImageGranularity() const
8262  {
8263  return VMA_MAX(
8264  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
8265  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
8266  }
8267 
8268  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
8269  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
8270 
8271  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
8272  {
8273  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
8274  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
8275  }
8276  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
8277  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
8278  {
8279  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
8280  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8281  }
8282  // Minimum alignment for all allocations in specific memory type.
8283  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
8284  {
8285  return IsMemoryTypeNonCoherent(memTypeIndex) ?
8286  VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
8287  (VkDeviceSize)VMA_MIN_ALIGNMENT;
8288  }
8289 
8290  bool IsIntegratedGpu() const
8291  {
8292  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
8293  }
8294 
8295  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
8296 
8297 #if VMA_RECORDING_ENABLED
8298  VmaRecorder* GetRecorder() const { return m_pRecorder; }
8299 #endif
8300 
8301  void GetBufferMemoryRequirements(
8302  VkBuffer hBuffer,
8303  VkMemoryRequirements& memReq,
8304  bool& requiresDedicatedAllocation,
8305  bool& prefersDedicatedAllocation) const;
8306  void GetImageMemoryRequirements(
8307  VkImage hImage,
8308  VkMemoryRequirements& memReq,
8309  bool& requiresDedicatedAllocation,
8310  bool& prefersDedicatedAllocation) const;
8311 
8312  // Main allocation function.
8313  VkResult AllocateMemory(
8314  const VkMemoryRequirements& vkMemReq,
8315  bool requiresDedicatedAllocation,
8316  bool prefersDedicatedAllocation,
8317  VkBuffer dedicatedBuffer,
8318  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
8319  VkImage dedicatedImage,
8320  const VmaAllocationCreateInfo& createInfo,
8321  VmaSuballocationType suballocType,
8322  size_t allocationCount,
8323  VmaAllocation* pAllocations);
8324 
8325  // Main deallocation function.
8326  void FreeMemory(
8327  size_t allocationCount,
8328  const VmaAllocation* pAllocations);
8329 
8330  void CalculateStats(VmaStats* pStats);
8331 
8332  void GetBudget(
8333  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
8334 
8335 #if VMA_STATS_STRING_ENABLED
8336  void PrintDetailedMap(class VmaJsonWriter& json);
8337 #endif
8338 
8339  VkResult DefragmentationBegin(
8340  const VmaDefragmentationInfo2& info,
8341  VmaDefragmentationStats* pStats,
8342  VmaDefragmentationContext* pContext);
8343  VkResult DefragmentationEnd(
8344  VmaDefragmentationContext context);
8345 
8346  VkResult DefragmentationPassBegin(
8348  VmaDefragmentationContext context);
8349  VkResult DefragmentationPassEnd(
8350  VmaDefragmentationContext context);
8351 
8352  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
8353  bool TouchAllocation(VmaAllocation hAllocation);
8354 
8355  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
8356  void DestroyPool(VmaPool pool);
8357  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
8358 
8359  void SetCurrentFrameIndex(uint32_t frameIndex);
8360  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
8361 
8362  void MakePoolAllocationsLost(
8363  VmaPool hPool,
8364  size_t* pLostAllocationCount);
8365  VkResult CheckPoolCorruption(VmaPool hPool);
8366  VkResult CheckCorruption(uint32_t memoryTypeBits);
8367 
8368  void CreateLostAllocation(VmaAllocation* pAllocation);
8369 
8370  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
8371  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
8372  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
8373  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
8374  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
8375  VkResult BindVulkanBuffer(
8376  VkDeviceMemory memory,
8377  VkDeviceSize memoryOffset,
8378  VkBuffer buffer,
8379  const void* pNext);
8380  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
8381  VkResult BindVulkanImage(
8382  VkDeviceMemory memory,
8383  VkDeviceSize memoryOffset,
8384  VkImage image,
8385  const void* pNext);
8386 
8387  VkResult Map(VmaAllocation hAllocation, void** ppData);
8388  void Unmap(VmaAllocation hAllocation);
8389 
8390  VkResult BindBufferMemory(
8391  VmaAllocation hAllocation,
8392  VkDeviceSize allocationLocalOffset,
8393  VkBuffer hBuffer,
8394  const void* pNext);
8395  VkResult BindImageMemory(
8396  VmaAllocation hAllocation,
8397  VkDeviceSize allocationLocalOffset,
8398  VkImage hImage,
8399  const void* pNext);
8400 
8401  VkResult FlushOrInvalidateAllocation(
8402  VmaAllocation hAllocation,
8403  VkDeviceSize offset, VkDeviceSize size,
8404  VMA_CACHE_OPERATION op);
8405  VkResult FlushOrInvalidateAllocations(
8406  uint32_t allocationCount,
8407  const VmaAllocation* allocations,
8408  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
8409  VMA_CACHE_OPERATION op);
8410 
8411  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
8412 
8413  /*
8414  Returns bit mask of memory types that can support defragmentation on GPU as
8415  they support creation of required buffer for copy operations.
8416  */
8417  uint32_t GetGpuDefragmentationMemoryTypeBits();
8418 
8419 #if VMA_EXTERNAL_MEMORY
8420  VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const
8421  {
8422  return m_TypeExternalMemoryHandleTypes[memTypeIndex];
8423  }
8424 #endif // #if VMA_EXTERNAL_MEMORY
8425 
8426 private:
8427  VkDeviceSize m_PreferredLargeHeapBlockSize;
8428 
8429  VkPhysicalDevice m_PhysicalDevice;
8430  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
8431  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
8432 #if VMA_EXTERNAL_MEMORY
8433  VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];
8434 #endif // #if VMA_EXTERNAL_MEMORY
8435 
8436  VMA_RW_MUTEX m_PoolsMutex;
8437  typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
8438  // Protected by m_PoolsMutex.
8439  PoolList m_Pools;
8440  uint32_t m_NextPoolId;
8441 
8442  VmaVulkanFunctions m_VulkanFunctions;
8443 
8444  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
8445  uint32_t m_GlobalMemoryTypeBits;
8446 
8447 #if VMA_RECORDING_ENABLED
8448  VmaRecorder* m_pRecorder;
8449 #endif
8450 
8451  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
8452 
8453 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
8454  void ImportVulkanFunctions_Static();
8455 #endif
8456 
8457  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
8458 
8459 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
8460  void ImportVulkanFunctions_Dynamic();
8461 #endif
8462 
8463  void ValidateVulkanFunctions();
8464 
8465  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
8466 
8467  VkResult AllocateMemoryOfType(
8468  VkDeviceSize size,
8469  VkDeviceSize alignment,
8470  bool dedicatedAllocation,
8471  VkBuffer dedicatedBuffer,
8472  VkBufferUsageFlags dedicatedBufferUsage,
8473  VkImage dedicatedImage,
8474  const VmaAllocationCreateInfo& createInfo,
8475  uint32_t memTypeIndex,
8476  VmaSuballocationType suballocType,
8477  size_t allocationCount,
8478  VmaAllocation* pAllocations);
8479 
8480  // Helper function only to be used inside AllocateDedicatedMemory.
8481  VkResult AllocateDedicatedMemoryPage(
8482  VkDeviceSize size,
8483  VmaSuballocationType suballocType,
8484  uint32_t memTypeIndex,
8485  const VkMemoryAllocateInfo& allocInfo,
8486  bool map,
8487  bool isUserDataString,
8488  void* pUserData,
8489  VmaAllocation* pAllocation);
8490 
8491  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
8492  VkResult AllocateDedicatedMemory(
8493  VkDeviceSize size,
8494  VmaSuballocationType suballocType,
8495  uint32_t memTypeIndex,
8496  bool withinBudget,
8497  bool map,
8498  bool isUserDataString,
8499  void* pUserData,
8500  float priority,
8501  VkBuffer dedicatedBuffer,
8502  VkBufferUsageFlags dedicatedBufferUsage,
8503  VkImage dedicatedImage,
8504  size_t allocationCount,
8505  VmaAllocation* pAllocations);
8506 
8507  void FreeDedicatedMemory(const VmaAllocation allocation);
8508 
8509  /*
8510  Calculates and returns bit mask of memory types that can support defragmentation
8511  on GPU as they support creation of required buffer for copy operations.
8512  */
8513  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
8514 
8515  uint32_t CalculateGlobalMemoryTypeBits() const;
8516 
8517  bool GetFlushOrInvalidateRange(
8518  VmaAllocation allocation,
8519  VkDeviceSize offset, VkDeviceSize size,
8520  VkMappedMemoryRange& outRange) const;
8521 
8522 #if VMA_MEMORY_BUDGET
8523  void UpdateVulkanBudget();
8524 #endif // #if VMA_MEMORY_BUDGET
8525 };
8526 
8528 // Memory allocation #2 after VmaAllocator_T definition
8529 
8530 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
8531 {
8532  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
8533 }
8534 
8535 static void VmaFree(VmaAllocator hAllocator, void* ptr)
8536 {
8537  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
8538 }
8539 
8540 template<typename T>
8541 static T* VmaAllocate(VmaAllocator hAllocator)
8542 {
8543  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
8544 }
8545 
8546 template<typename T>
8547 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
8548 {
8549  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
8550 }
8551 
8552 template<typename T>
8553 static void vma_delete(VmaAllocator hAllocator, T* ptr)
8554 {
8555  if(ptr != VMA_NULL)
8556  {
8557  ptr->~T();
8558  VmaFree(hAllocator, ptr);
8559  }
8560 }
8561 
8562 template<typename T>
8563 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
8564 {
8565  if(ptr != VMA_NULL)
8566  {
8567  for(size_t i = count; i--; )
8568  ptr[i].~T();
8569  VmaFree(hAllocator, ptr);
8570  }
8571 }
8572 
8574 // VmaStringBuilder
8575 
8576 #if VMA_STATS_STRING_ENABLED
8577 
8578 class VmaStringBuilder
8579 {
8580 public:
8581  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
8582  size_t GetLength() const { return m_Data.size(); }
8583  const char* GetData() const { return m_Data.data(); }
8584 
8585  void Add(char ch) { m_Data.push_back(ch); }
8586  void Add(const char* pStr);
8587  void AddNewLine() { Add('\n'); }
8588  void AddNumber(uint32_t num);
8589  void AddNumber(uint64_t num);
8590  void AddPointer(const void* ptr);
8591 
8592 private:
8593  VmaVector< char, VmaStlAllocator<char> > m_Data;
8594 };
8595 
8596 void VmaStringBuilder::Add(const char* pStr)
8597 {
8598  const size_t strLen = strlen(pStr);
8599  if(strLen > 0)
8600  {
8601  const size_t oldCount = m_Data.size();
8602  m_Data.resize(oldCount + strLen);
8603  memcpy(m_Data.data() + oldCount, pStr, strLen);
8604  }
8605 }
8606 
8607 void VmaStringBuilder::AddNumber(uint32_t num)
8608 {
8609  char buf[11];
8610  buf[10] = '\0';
8611  char *p = &buf[10];
8612  do
8613  {
8614  *--p = '0' + (num % 10);
8615  num /= 10;
8616  }
8617  while(num);
8618  Add(p);
8619 }
8620 
8621 void VmaStringBuilder::AddNumber(uint64_t num)
8622 {
8623  char buf[21];
8624  buf[20] = '\0';
8625  char *p = &buf[20];
8626  do
8627  {
8628  *--p = '0' + (num % 10);
8629  num /= 10;
8630  }
8631  while(num);
8632  Add(p);
8633 }
8634 
8635 void VmaStringBuilder::AddPointer(const void* ptr)
8636 {
8637  char buf[21];
8638  VmaPtrToStr(buf, sizeof(buf), ptr);
8639  Add(buf);
8640 }
8641 
8642 #endif // #if VMA_STATS_STRING_ENABLED
8643 
8645 // VmaJsonWriter
8646 
8647 #if VMA_STATS_STRING_ENABLED
8648 
8649 class VmaJsonWriter
8650 {
8651  VMA_CLASS_NO_COPY(VmaJsonWriter)
8652 public:
8653  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
8654  ~VmaJsonWriter();
8655 
8656  void BeginObject(bool singleLine = false);
8657  void EndObject();
8658 
8659  void BeginArray(bool singleLine = false);
8660  void EndArray();
8661 
8662  void WriteString(const char* pStr);
8663  void BeginString(const char* pStr = VMA_NULL);
8664  void ContinueString(const char* pStr);
8665  void ContinueString(uint32_t n);
8666  void ContinueString(uint64_t n);
8667  void ContinueString_Pointer(const void* ptr);
8668  void EndString(const char* pStr = VMA_NULL);
8669 
8670  void WriteNumber(uint32_t n);
8671  void WriteNumber(uint64_t n);
8672  void WriteBool(bool b);
8673  void WriteNull();
8674 
8675 private:
8676  static const char* const INDENT;
8677 
8678  enum COLLECTION_TYPE
8679  {
8680  COLLECTION_TYPE_OBJECT,
8681  COLLECTION_TYPE_ARRAY,
8682  };
8683  struct StackItem
8684  {
8685  COLLECTION_TYPE type;
8686  uint32_t valueCount;
8687  bool singleLineMode;
8688  };
8689 
8690  VmaStringBuilder& m_SB;
8691  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
8692  bool m_InsideString;
8693 
8694  void BeginValue(bool isString);
8695  void WriteIndent(bool oneLess = false);
8696 };
8697 
8698 const char* const VmaJsonWriter::INDENT = " ";
8699 
8700 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
8701  m_SB(sb),
8702  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
8703  m_InsideString(false)
8704 {
8705 }
8706 
8707 VmaJsonWriter::~VmaJsonWriter()
8708 {
8709  VMA_ASSERT(!m_InsideString);
8710  VMA_ASSERT(m_Stack.empty());
8711 }
8712 
8713 void VmaJsonWriter::BeginObject(bool singleLine)
8714 {
8715  VMA_ASSERT(!m_InsideString);
8716 
8717  BeginValue(false);
8718  m_SB.Add('{');
8719 
8720  StackItem item;
8721  item.type = COLLECTION_TYPE_OBJECT;
8722  item.valueCount = 0;
8723  item.singleLineMode = singleLine;
8724  m_Stack.push_back(item);
8725 }
8726 
8727 void VmaJsonWriter::EndObject()
8728 {
8729  VMA_ASSERT(!m_InsideString);
8730 
8731  WriteIndent(true);
8732  m_SB.Add('}');
8733 
8734  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
8735  m_Stack.pop_back();
8736 }
8737 
8738 void VmaJsonWriter::BeginArray(bool singleLine)
8739 {
8740  VMA_ASSERT(!m_InsideString);
8741 
8742  BeginValue(false);
8743  m_SB.Add('[');
8744 
8745  StackItem item;
8746  item.type = COLLECTION_TYPE_ARRAY;
8747  item.valueCount = 0;
8748  item.singleLineMode = singleLine;
8749  m_Stack.push_back(item);
8750 }
8751 
8752 void VmaJsonWriter::EndArray()
8753 {
8754  VMA_ASSERT(!m_InsideString);
8755 
8756  WriteIndent(true);
8757  m_SB.Add(']');
8758 
8759  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
8760  m_Stack.pop_back();
8761 }
8762 
8763 void VmaJsonWriter::WriteString(const char* pStr)
8764 {
8765  BeginString(pStr);
8766  EndString();
8767 }
8768 
8769 void VmaJsonWriter::BeginString(const char* pStr)
8770 {
8771  VMA_ASSERT(!m_InsideString);
8772 
8773  BeginValue(true);
8774  m_SB.Add('"');
8775  m_InsideString = true;
8776  if(pStr != VMA_NULL && pStr[0] != '\0')
8777  {
8778  ContinueString(pStr);
8779  }
8780 }
8781 
8782 void VmaJsonWriter::ContinueString(const char* pStr)
8783 {
8784  VMA_ASSERT(m_InsideString);
8785 
8786  const size_t strLen = strlen(pStr);
8787  for(size_t i = 0; i < strLen; ++i)
8788  {
8789  char ch = pStr[i];
8790  if(ch == '\\')
8791  {
8792  m_SB.Add("\\\\");
8793  }
8794  else if(ch == '"')
8795  {
8796  m_SB.Add("\\\"");
8797  }
8798  else if(ch >= 32)
8799  {
8800  m_SB.Add(ch);
8801  }
8802  else switch(ch)
8803  {
8804  case '\b':
8805  m_SB.Add("\\b");
8806  break;
8807  case '\f':
8808  m_SB.Add("\\f");
8809  break;
8810  case '\n':
8811  m_SB.Add("\\n");
8812  break;
8813  case '\r':
8814  m_SB.Add("\\r");
8815  break;
8816  case '\t':
8817  m_SB.Add("\\t");
8818  break;
8819  default:
8820  VMA_ASSERT(0 && "Character not currently supported.");
8821  break;
8822  }
8823  }
8824 }
8825 
8826 void VmaJsonWriter::ContinueString(uint32_t n)
8827 {
8828  VMA_ASSERT(m_InsideString);
8829  m_SB.AddNumber(n);
8830 }
8831 
8832 void VmaJsonWriter::ContinueString(uint64_t n)
8833 {
8834  VMA_ASSERT(m_InsideString);
8835  m_SB.AddNumber(n);
8836 }
8837 
8838 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
8839 {
8840  VMA_ASSERT(m_InsideString);
8841  m_SB.AddPointer(ptr);
8842 }
8843 
8844 void VmaJsonWriter::EndString(const char* pStr)
8845 {
8846  VMA_ASSERT(m_InsideString);
8847  if(pStr != VMA_NULL && pStr[0] != '\0')
8848  {
8849  ContinueString(pStr);
8850  }
8851  m_SB.Add('"');
8852  m_InsideString = false;
8853 }
8854 
8855 void VmaJsonWriter::WriteNumber(uint32_t n)
8856 {
8857  VMA_ASSERT(!m_InsideString);
8858  BeginValue(false);
8859  m_SB.AddNumber(n);
8860 }
8861 
8862 void VmaJsonWriter::WriteNumber(uint64_t n)
8863 {
8864  VMA_ASSERT(!m_InsideString);
8865  BeginValue(false);
8866  m_SB.AddNumber(n);
8867 }
8868 
8869 void VmaJsonWriter::WriteBool(bool b)
8870 {
8871  VMA_ASSERT(!m_InsideString);
8872  BeginValue(false);
8873  m_SB.Add(b ? "true" : "false");
8874 }
8875 
8876 void VmaJsonWriter::WriteNull()
8877 {
8878  VMA_ASSERT(!m_InsideString);
8879  BeginValue(false);
8880  m_SB.Add("null");
8881 }
8882 
8883 void VmaJsonWriter::BeginValue(bool isString)
8884 {
8885  if(!m_Stack.empty())
8886  {
8887  StackItem& currItem = m_Stack.back();
8888  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8889  currItem.valueCount % 2 == 0)
8890  {
8891  VMA_ASSERT(isString);
8892  }
8893 
8894  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8895  currItem.valueCount % 2 != 0)
8896  {
8897  m_SB.Add(": ");
8898  }
8899  else if(currItem.valueCount > 0)
8900  {
8901  m_SB.Add(", ");
8902  WriteIndent();
8903  }
8904  else
8905  {
8906  WriteIndent();
8907  }
8908  ++currItem.valueCount;
8909  }
8910 }
8911 
8912 void VmaJsonWriter::WriteIndent(bool oneLess)
8913 {
8914  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
8915  {
8916  m_SB.AddNewLine();
8917 
8918  size_t count = m_Stack.size();
8919  if(count > 0 && oneLess)
8920  {
8921  --count;
8922  }
8923  for(size_t i = 0; i < count; ++i)
8924  {
8925  m_SB.Add(INDENT);
8926  }
8927  }
8928 }
8929 
8930 #endif // #if VMA_STATS_STRING_ENABLED
8931 
8933 
8934 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
8935 {
8936  if(IsUserDataString())
8937  {
8938  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
8939 
8940  FreeUserDataString(hAllocator);
8941 
8942  if(pUserData != VMA_NULL)
8943  {
8944  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
8945  }
8946  }
8947  else
8948  {
8949  m_pUserData = pUserData;
8950  }
8951 }
8952 
8953 void VmaAllocation_T::ChangeBlockAllocation(
8954  VmaAllocator hAllocator,
8955  VmaDeviceMemoryBlock* block,
8956  VkDeviceSize offset)
8957 {
8958  VMA_ASSERT(block != VMA_NULL);
8959  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8960 
8961  // Move mapping reference counter from old block to new block.
8962  if(block != m_BlockAllocation.m_Block)
8963  {
8964  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8965  if(IsPersistentMap())
8966  ++mapRefCount;
8967  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8968  block->Map(hAllocator, mapRefCount, VMA_NULL);
8969  }
8970 
8971  m_BlockAllocation.m_Block = block;
8972  m_BlockAllocation.m_Offset = offset;
8973 }
8974 
8975 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8976 {
8977  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8978  m_BlockAllocation.m_Offset = newOffset;
8979 }
8980 
8981 VkDeviceSize VmaAllocation_T::GetOffset() const
8982 {
8983  switch(m_Type)
8984  {
8985  case ALLOCATION_TYPE_BLOCK:
8986  return m_BlockAllocation.m_Offset;
8987  case ALLOCATION_TYPE_DEDICATED:
8988  return 0;
8989  default:
8990  VMA_ASSERT(0);
8991  return 0;
8992  }
8993 }
8994 
8995 VkDeviceMemory VmaAllocation_T::GetMemory() const
8996 {
8997  switch(m_Type)
8998  {
8999  case ALLOCATION_TYPE_BLOCK:
9000  return m_BlockAllocation.m_Block->GetDeviceMemory();
9001  case ALLOCATION_TYPE_DEDICATED:
9002  return m_DedicatedAllocation.m_hMemory;
9003  default:
9004  VMA_ASSERT(0);
9005  return VK_NULL_HANDLE;
9006  }
9007 }
9008 
9009 void* VmaAllocation_T::GetMappedData() const
9010 {
9011  switch(m_Type)
9012  {
9013  case ALLOCATION_TYPE_BLOCK:
9014  if(m_MapCount != 0)
9015  {
9016  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
9017  VMA_ASSERT(pBlockData != VMA_NULL);
9018  return (char*)pBlockData + m_BlockAllocation.m_Offset;
9019  }
9020  else
9021  {
9022  return VMA_NULL;
9023  }
9024  break;
9025  case ALLOCATION_TYPE_DEDICATED:
9026  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
9027  return m_DedicatedAllocation.m_pMappedData;
9028  default:
9029  VMA_ASSERT(0);
9030  return VMA_NULL;
9031  }
9032 }
9033 
9034 bool VmaAllocation_T::CanBecomeLost() const
9035 {
9036  switch(m_Type)
9037  {
9038  case ALLOCATION_TYPE_BLOCK:
9039  return m_BlockAllocation.m_CanBecomeLost;
9040  case ALLOCATION_TYPE_DEDICATED:
9041  return false;
9042  default:
9043  VMA_ASSERT(0);
9044  return false;
9045  }
9046 }
9047 
9048 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9049 {
9050  VMA_ASSERT(CanBecomeLost());
9051 
9052  /*
9053  Warning: This is a carefully designed algorithm.
9054  Do not modify unless you really know what you're doing :)
9055  */
9056  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
9057  for(;;)
9058  {
9059  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
9060  {
9061  VMA_ASSERT(0);
9062  return false;
9063  }
9064  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
9065  {
9066  return false;
9067  }
9068  else // Last use time earlier than current time.
9069  {
9070  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
9071  {
9072  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
9073  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
9074  return true;
9075  }
9076  }
9077  }
9078 }
9079 
9080 #if VMA_STATS_STRING_ENABLED
9081 
9082 // Correspond to values of enum VmaSuballocationType.
9083 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
9084  "FREE",
9085  "UNKNOWN",
9086  "BUFFER",
9087  "IMAGE_UNKNOWN",
9088  "IMAGE_LINEAR",
9089  "IMAGE_OPTIMAL",
9090 };
9091 
9092 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
9093 {
9094  json.WriteString("Type");
9095  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
9096 
9097  json.WriteString("Size");
9098  json.WriteNumber(m_Size);
9099 
9100  if(m_pUserData != VMA_NULL)
9101  {
9102  json.WriteString("UserData");
9103  if(IsUserDataString())
9104  {
9105  json.WriteString((const char*)m_pUserData);
9106  }
9107  else
9108  {
9109  json.BeginString();
9110  json.ContinueString_Pointer(m_pUserData);
9111  json.EndString();
9112  }
9113  }
9114 
9115  json.WriteString("CreationFrameIndex");
9116  json.WriteNumber(m_CreationFrameIndex);
9117 
9118  json.WriteString("LastUseFrameIndex");
9119  json.WriteNumber(GetLastUseFrameIndex());
9120 
9121  if(m_BufferImageUsage != 0)
9122  {
9123  json.WriteString("Usage");
9124  json.WriteNumber(m_BufferImageUsage);
9125  }
9126 }
9127 
9128 #endif
9129 
9130 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
9131 {
9132  VMA_ASSERT(IsUserDataString());
9133  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
9134  m_pUserData = VMA_NULL;
9135 }
9136 
9137 void VmaAllocation_T::BlockAllocMap()
9138 {
9139  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
9140 
9141  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
9142  {
9143  ++m_MapCount;
9144  }
9145  else
9146  {
9147  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
9148  }
9149 }
9150 
9151 void VmaAllocation_T::BlockAllocUnmap()
9152 {
9153  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
9154 
9155  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
9156  {
9157  --m_MapCount;
9158  }
9159  else
9160  {
9161  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
9162  }
9163 }
9164 
9165 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
9166 {
9167  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
9168 
9169  if(m_MapCount != 0)
9170  {
9171  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
9172  {
9173  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
9174  *ppData = m_DedicatedAllocation.m_pMappedData;
9175  ++m_MapCount;
9176  return VK_SUCCESS;
9177  }
9178  else
9179  {
9180  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
9181  return VK_ERROR_MEMORY_MAP_FAILED;
9182  }
9183  }
9184  else
9185  {
9186  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9187  hAllocator->m_hDevice,
9188  m_DedicatedAllocation.m_hMemory,
9189  0, // offset
9190  VK_WHOLE_SIZE,
9191  0, // flags
9192  ppData);
9193  if(result == VK_SUCCESS)
9194  {
9195  m_DedicatedAllocation.m_pMappedData = *ppData;
9196  m_MapCount = 1;
9197  }
9198  return result;
9199  }
9200 }
9201 
9202 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
9203 {
9204  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
9205 
9206  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
9207  {
9208  --m_MapCount;
9209  if(m_MapCount == 0)
9210  {
9211  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
9212  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
9213  hAllocator->m_hDevice,
9214  m_DedicatedAllocation.m_hMemory);
9215  }
9216  }
9217  else
9218  {
9219  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
9220  }
9221 }
9222 
9223 #if VMA_STATS_STRING_ENABLED
9224 
9225 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
9226 {
9227  json.BeginObject();
9228 
9229  json.WriteString("Blocks");
9230  json.WriteNumber(stat.blockCount);
9231 
9232  json.WriteString("Allocations");
9233  json.WriteNumber(stat.allocationCount);
9234 
9235  json.WriteString("UnusedRanges");
9236  json.WriteNumber(stat.unusedRangeCount);
9237 
9238  json.WriteString("UsedBytes");
9239  json.WriteNumber(stat.usedBytes);
9240 
9241  json.WriteString("UnusedBytes");
9242  json.WriteNumber(stat.unusedBytes);
9243 
9244  if(stat.allocationCount > 1)
9245  {
9246  json.WriteString("AllocationSize");
9247  json.BeginObject(true);
9248  json.WriteString("Min");
9249  json.WriteNumber(stat.allocationSizeMin);
9250  json.WriteString("Avg");
9251  json.WriteNumber(stat.allocationSizeAvg);
9252  json.WriteString("Max");
9253  json.WriteNumber(stat.allocationSizeMax);
9254  json.EndObject();
9255  }
9256 
9257  if(stat.unusedRangeCount > 1)
9258  {
9259  json.WriteString("UnusedRangeSize");
9260  json.BeginObject(true);
9261  json.WriteString("Min");
9262  json.WriteNumber(stat.unusedRangeSizeMin);
9263  json.WriteString("Avg");
9264  json.WriteNumber(stat.unusedRangeSizeAvg);
9265  json.WriteString("Max");
9266  json.WriteNumber(stat.unusedRangeSizeMax);
9267  json.EndObject();
9268  }
9269 
9270  json.EndObject();
9271 }
9272 
9273 #endif // #if VMA_STATS_STRING_ENABLED
9274 
9275 struct VmaSuballocationItemSizeLess
9276 {
9277  bool operator()(
9278  const VmaSuballocationList::iterator lhs,
9279  const VmaSuballocationList::iterator rhs) const
9280  {
9281  return lhs->size < rhs->size;
9282  }
9283  bool operator()(
9284  const VmaSuballocationList::iterator lhs,
9285  VkDeviceSize rhsSize) const
9286  {
9287  return lhs->size < rhsSize;
9288  }
9289 };
9290 
9291 
9293 // class VmaBlockMetadata
9294 
9295 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
9296  m_Size(0),
9297  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
9298 {
9299 }
9300 
9301 #if VMA_STATS_STRING_ENABLED
9302 
9303 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
9304  VkDeviceSize unusedBytes,
9305  size_t allocationCount,
9306  size_t unusedRangeCount) const
9307 {
9308  json.BeginObject();
9309 
9310  json.WriteString("TotalBytes");
9311  json.WriteNumber(GetSize());
9312 
9313  json.WriteString("UnusedBytes");
9314  json.WriteNumber(unusedBytes);
9315 
9316  json.WriteString("Allocations");
9317  json.WriteNumber((uint64_t)allocationCount);
9318 
9319  json.WriteString("UnusedRanges");
9320  json.WriteNumber((uint64_t)unusedRangeCount);
9321 
9322  json.WriteString("Suballocations");
9323  json.BeginArray();
9324 }
9325 
9326 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
9327  VkDeviceSize offset,
9328  VmaAllocation hAllocation) const
9329 {
9330  json.BeginObject(true);
9331 
9332  json.WriteString("Offset");
9333  json.WriteNumber(offset);
9334 
9335  hAllocation->PrintParameters(json);
9336 
9337  json.EndObject();
9338 }
9339 
9340 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
9341  VkDeviceSize offset,
9342  VkDeviceSize size) const
9343 {
9344  json.BeginObject(true);
9345 
9346  json.WriteString("Offset");
9347  json.WriteNumber(offset);
9348 
9349  json.WriteString("Type");
9350  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
9351 
9352  json.WriteString("Size");
9353  json.WriteNumber(size);
9354 
9355  json.EndObject();
9356 }
9357 
9358 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
9359 {
9360  json.EndArray();
9361  json.EndObject();
9362 }
9363 
9364 #endif // #if VMA_STATS_STRING_ENABLED
9365 
9367 // class VmaBlockMetadata_Generic
9368 
9369 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
9370  VmaBlockMetadata(hAllocator),
9371  m_FreeCount(0),
9372  m_SumFreeSize(0),
9373  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9374  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
9375 {
9376 }
9377 
9378 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
9379 {
9380 }
9381 
9382 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
9383 {
9384  VmaBlockMetadata::Init(size);
9385 
9386  m_FreeCount = 1;
9387  m_SumFreeSize = size;
9388 
9389  VmaSuballocation suballoc = {};
9390  suballoc.offset = 0;
9391  suballoc.size = size;
9392  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9393  suballoc.hAllocation = VK_NULL_HANDLE;
9394 
9395  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9396  m_Suballocations.push_back(suballoc);
9397  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
9398  --suballocItem;
9399  m_FreeSuballocationsBySize.push_back(suballocItem);
9400 }
9401 
9402 bool VmaBlockMetadata_Generic::Validate() const
9403 {
9404  VMA_VALIDATE(!m_Suballocations.empty());
9405 
9406  // Expected offset of new suballocation as calculated from previous ones.
9407  VkDeviceSize calculatedOffset = 0;
9408  // Expected number of free suballocations as calculated from traversing their list.
9409  uint32_t calculatedFreeCount = 0;
9410  // Expected sum size of free suballocations as calculated from traversing their list.
9411  VkDeviceSize calculatedSumFreeSize = 0;
9412  // Expected number of free suballocations that should be registered in
9413  // m_FreeSuballocationsBySize calculated from traversing their list.
9414  size_t freeSuballocationsToRegister = 0;
9415  // True if previous visited suballocation was free.
9416  bool prevFree = false;
9417 
9418  for(const auto& subAlloc : m_Suballocations)
9419  {
9420  // Actual offset of this suballocation doesn't match expected one.
9421  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
9422 
9423  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
9424  // Two adjacent free suballocations are invalid. They should be merged.
9425  VMA_VALIDATE(!prevFree || !currFree);
9426 
9427  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
9428 
9429  if(currFree)
9430  {
9431  calculatedSumFreeSize += subAlloc.size;
9432  ++calculatedFreeCount;
9433  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9434  {
9435  ++freeSuballocationsToRegister;
9436  }
9437 
9438  // Margin required between allocations - every free space must be at least that large.
9439  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
9440  }
9441  else
9442  {
9443  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
9444  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
9445 
9446  // Margin required between allocations - previous allocation must be free.
9447  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
9448  }
9449 
9450  calculatedOffset += subAlloc.size;
9451  prevFree = currFree;
9452  }
9453 
9454  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
9455  // match expected one.
9456  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
9457 
9458  VkDeviceSize lastSize = 0;
9459  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
9460  {
9461  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
9462 
9463  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
9464  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9465  // They must be sorted by size ascending.
9466  VMA_VALIDATE(suballocItem->size >= lastSize);
9467 
9468  lastSize = suballocItem->size;
9469  }
9470 
9471  // Check if totals match calculated values.
9472  VMA_VALIDATE(ValidateFreeSuballocationList());
9473  VMA_VALIDATE(calculatedOffset == GetSize());
9474  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
9475  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
9476 
9477  return true;
9478 }
9479 
9480 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
9481 {
9482  if(!m_FreeSuballocationsBySize.empty())
9483  {
9484  return m_FreeSuballocationsBySize.back()->size;
9485  }
9486  else
9487  {
9488  return 0;
9489  }
9490 }
9491 
9492 bool VmaBlockMetadata_Generic::IsEmpty() const
9493 {
9494  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
9495 }
9496 
9497 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9498 {
9499  outInfo.blockCount = 1;
9500 
9501  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9502  outInfo.allocationCount = rangeCount - m_FreeCount;
9503  outInfo.unusedRangeCount = m_FreeCount;
9504 
9505  outInfo.unusedBytes = m_SumFreeSize;
9506  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
9507 
9508  outInfo.allocationSizeMin = UINT64_MAX;
9509  outInfo.allocationSizeMax = 0;
9510  outInfo.unusedRangeSizeMin = UINT64_MAX;
9511  outInfo.unusedRangeSizeMax = 0;
9512 
9513  for(const auto& suballoc : m_Suballocations)
9514  {
9515  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9516  {
9517  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9518  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
9519  }
9520  else
9521  {
9522  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
9523  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
9524  }
9525  }
9526 }
9527 
9528 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
9529 {
9530  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9531 
9532  inoutStats.size += GetSize();
9533  inoutStats.unusedSize += m_SumFreeSize;
9534  inoutStats.allocationCount += rangeCount - m_FreeCount;
9535  inoutStats.unusedRangeCount += m_FreeCount;
9536  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9537 }
9538 
9539 #if VMA_STATS_STRING_ENABLED
9540 
9541 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
9542 {
9543  PrintDetailedMap_Begin(json,
9544  m_SumFreeSize, // unusedBytes
9545  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
9546  m_FreeCount); // unusedRangeCount
9547 
9548  size_t i = 0;
9549  for(const auto& suballoc : m_Suballocations)
9550  {
9551  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9552  {
9553  PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
9554  }
9555  else
9556  {
9557  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9558  }
9559  }
9560 
9561  PrintDetailedMap_End(json);
9562 }
9563 
9564 #endif // #if VMA_STATS_STRING_ENABLED
9565 
9566 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
9567  uint32_t currentFrameIndex,
9568  uint32_t frameInUseCount,
9569  VkDeviceSize bufferImageGranularity,
9570  VkDeviceSize allocSize,
9571  VkDeviceSize allocAlignment,
9572  bool upperAddress,
9573  VmaSuballocationType allocType,
9574  bool canMakeOtherLost,
9575  uint32_t strategy,
9576  VmaAllocationRequest* pAllocationRequest)
9577 {
9578  VMA_ASSERT(allocSize > 0);
9579  VMA_ASSERT(!upperAddress);
9580  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9581  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9582  VMA_HEAVY_ASSERT(Validate());
9583 
9584  pAllocationRequest->type = VmaAllocationRequestType::Normal;
9585 
9586  // There is not enough total free space in this block to fullfill the request: Early return.
9587  if(canMakeOtherLost == false &&
9588  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
9589  {
9590  return false;
9591  }
9592 
9593  // New algorithm, efficiently searching freeSuballocationsBySize.
9594  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
9595  if(freeSuballocCount > 0)
9596  {
9598  {
9599  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
9600  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9601  m_FreeSuballocationsBySize.data(),
9602  m_FreeSuballocationsBySize.data() + freeSuballocCount,
9603  allocSize + 2 * VMA_DEBUG_MARGIN,
9604  VmaSuballocationItemSizeLess());
9605  size_t index = it - m_FreeSuballocationsBySize.data();
9606  for(; index < freeSuballocCount; ++index)
9607  {
9608  if(CheckAllocation(
9609  currentFrameIndex,
9610  frameInUseCount,
9611  bufferImageGranularity,
9612  allocSize,
9613  allocAlignment,
9614  allocType,
9615  m_FreeSuballocationsBySize[index],
9616  false, // canMakeOtherLost
9617  &pAllocationRequest->offset,
9618  &pAllocationRequest->itemsToMakeLostCount,
9619  &pAllocationRequest->sumFreeSize,
9620  &pAllocationRequest->sumItemSize))
9621  {
9622  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9623  return true;
9624  }
9625  }
9626  }
9627  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
9628  {
9629  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9630  it != m_Suballocations.end();
9631  ++it)
9632  {
9633  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
9634  currentFrameIndex,
9635  frameInUseCount,
9636  bufferImageGranularity,
9637  allocSize,
9638  allocAlignment,
9639  allocType,
9640  it,
9641  false, // canMakeOtherLost
9642  &pAllocationRequest->offset,
9643  &pAllocationRequest->itemsToMakeLostCount,
9644  &pAllocationRequest->sumFreeSize,
9645  &pAllocationRequest->sumItemSize))
9646  {
9647  pAllocationRequest->item = it;
9648  return true;
9649  }
9650  }
9651  }
9652  else // WORST_FIT, FIRST_FIT
9653  {
9654  // Search staring from biggest suballocations.
9655  for(size_t index = freeSuballocCount; index--; )
9656  {
9657  if(CheckAllocation(
9658  currentFrameIndex,
9659  frameInUseCount,
9660  bufferImageGranularity,
9661  allocSize,
9662  allocAlignment,
9663  allocType,
9664  m_FreeSuballocationsBySize[index],
9665  false, // canMakeOtherLost
9666  &pAllocationRequest->offset,
9667  &pAllocationRequest->itemsToMakeLostCount,
9668  &pAllocationRequest->sumFreeSize,
9669  &pAllocationRequest->sumItemSize))
9670  {
9671  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9672  return true;
9673  }
9674  }
9675  }
9676  }
9677 
9678  if(canMakeOtherLost)
9679  {
9680  // Brute-force algorithm. TODO: Come up with something better.
9681 
9682  bool found = false;
9683  VmaAllocationRequest tmpAllocRequest = {};
9684  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
9685  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
9686  suballocIt != m_Suballocations.end();
9687  ++suballocIt)
9688  {
9689  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
9690  suballocIt->hAllocation->CanBecomeLost())
9691  {
9692  if(CheckAllocation(
9693  currentFrameIndex,
9694  frameInUseCount,
9695  bufferImageGranularity,
9696  allocSize,
9697  allocAlignment,
9698  allocType,
9699  suballocIt,
9700  canMakeOtherLost,
9701  &tmpAllocRequest.offset,
9702  &tmpAllocRequest.itemsToMakeLostCount,
9703  &tmpAllocRequest.sumFreeSize,
9704  &tmpAllocRequest.sumItemSize))
9705  {
9707  {
9708  *pAllocationRequest = tmpAllocRequest;
9709  pAllocationRequest->item = suballocIt;
9710  break;
9711  }
9712  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
9713  {
9714  *pAllocationRequest = tmpAllocRequest;
9715  pAllocationRequest->item = suballocIt;
9716  found = true;
9717  }
9718  }
9719  }
9720  }
9721 
9722  return found;
9723  }
9724 
9725  return false;
9726 }
9727 
9728 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
9729  uint32_t currentFrameIndex,
9730  uint32_t frameInUseCount,
9731  VmaAllocationRequest* pAllocationRequest)
9732 {
9733  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
9734 
9735  while(pAllocationRequest->itemsToMakeLostCount > 0)
9736  {
9737  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
9738  {
9739  ++pAllocationRequest->item;
9740  }
9741  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9742  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
9743  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
9744  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9745  {
9746  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
9747  --pAllocationRequest->itemsToMakeLostCount;
9748  }
9749  else
9750  {
9751  return false;
9752  }
9753  }
9754 
9755  VMA_HEAVY_ASSERT(Validate());
9756  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9757  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
9758 
9759  return true;
9760 }
9761 
9762 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9763 {
9764  uint32_t lostAllocationCount = 0;
9765  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9766  it != m_Suballocations.end();
9767  ++it)
9768  {
9769  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
9770  it->hAllocation->CanBecomeLost() &&
9771  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9772  {
9773  it = FreeSuballocation(it);
9774  ++lostAllocationCount;
9775  }
9776  }
9777  return lostAllocationCount;
9778 }
9779 
9780 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
9781 {
9782  for(auto& suballoc : m_Suballocations)
9783  {
9784  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9785  {
9786  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9787  {
9788  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9789  return VK_ERROR_VALIDATION_FAILED_EXT;
9790  }
9791  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9792  {
9793  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9794  return VK_ERROR_VALIDATION_FAILED_EXT;
9795  }
9796  }
9797  }
9798 
9799  return VK_SUCCESS;
9800 }
9801 
9802 void VmaBlockMetadata_Generic::Alloc(
9803  const VmaAllocationRequest& request,
9804  VmaSuballocationType type,
9805  VkDeviceSize allocSize,
9806  VmaAllocation hAllocation)
9807 {
9808  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
9809  VMA_ASSERT(request.item != m_Suballocations.end());
9810  VmaSuballocation& suballoc = *request.item;
9811  // Given suballocation is a free block.
9812  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9813  // Given offset is inside this suballocation.
9814  VMA_ASSERT(request.offset >= suballoc.offset);
9815  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
9816  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
9817  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
9818 
9819  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
9820  // it to become used.
9821  UnregisterFreeSuballocation(request.item);
9822 
9823  suballoc.offset = request.offset;
9824  suballoc.size = allocSize;
9825  suballoc.type = type;
9826  suballoc.hAllocation = hAllocation;
9827 
9828  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
9829  if(paddingEnd)
9830  {
9831  VmaSuballocation paddingSuballoc = {};
9832  paddingSuballoc.offset = request.offset + allocSize;
9833  paddingSuballoc.size = paddingEnd;
9834  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9835  VmaSuballocationList::iterator next = request.item;
9836  ++next;
9837  const VmaSuballocationList::iterator paddingEndItem =
9838  m_Suballocations.insert(next, paddingSuballoc);
9839  RegisterFreeSuballocation(paddingEndItem);
9840  }
9841 
9842  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
9843  if(paddingBegin)
9844  {
9845  VmaSuballocation paddingSuballoc = {};
9846  paddingSuballoc.offset = request.offset - paddingBegin;
9847  paddingSuballoc.size = paddingBegin;
9848  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9849  const VmaSuballocationList::iterator paddingBeginItem =
9850  m_Suballocations.insert(request.item, paddingSuballoc);
9851  RegisterFreeSuballocation(paddingBeginItem);
9852  }
9853 
9854  // Update totals.
9855  m_FreeCount = m_FreeCount - 1;
9856  if(paddingBegin > 0)
9857  {
9858  ++m_FreeCount;
9859  }
9860  if(paddingEnd > 0)
9861  {
9862  ++m_FreeCount;
9863  }
9864  m_SumFreeSize -= allocSize;
9865 }
9866 
9867 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
9868 {
9869  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9870  suballocItem != m_Suballocations.end();
9871  ++suballocItem)
9872  {
9873  VmaSuballocation& suballoc = *suballocItem;
9874  if(suballoc.hAllocation == allocation)
9875  {
9876  FreeSuballocation(suballocItem);
9877  VMA_HEAVY_ASSERT(Validate());
9878  return;
9879  }
9880  }
9881  VMA_ASSERT(0 && "Not found!");
9882 }
9883 
9884 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
9885 {
9886  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9887  suballocItem != m_Suballocations.end();
9888  ++suballocItem)
9889  {
9890  VmaSuballocation& suballoc = *suballocItem;
9891  if(suballoc.offset == offset)
9892  {
9893  FreeSuballocation(suballocItem);
9894  return;
9895  }
9896  }
9897  VMA_ASSERT(0 && "Not found!");
9898 }
9899 
9900 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
9901 {
9902  VkDeviceSize lastSize = 0;
9903  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
9904  {
9905  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
9906 
9907  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
9908  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9909  VMA_VALIDATE(it->size >= lastSize);
9910  lastSize = it->size;
9911  }
9912  return true;
9913 }
9914 
9915 bool VmaBlockMetadata_Generic::CheckAllocation(
9916  uint32_t currentFrameIndex,
9917  uint32_t frameInUseCount,
9918  VkDeviceSize bufferImageGranularity,
9919  VkDeviceSize allocSize,
9920  VkDeviceSize allocAlignment,
9921  VmaSuballocationType allocType,
9922  VmaSuballocationList::const_iterator suballocItem,
9923  bool canMakeOtherLost,
9924  VkDeviceSize* pOffset,
9925  size_t* itemsToMakeLostCount,
9926  VkDeviceSize* pSumFreeSize,
9927  VkDeviceSize* pSumItemSize) const
9928 {
9929  VMA_ASSERT(allocSize > 0);
9930  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9931  VMA_ASSERT(suballocItem != m_Suballocations.cend());
9932  VMA_ASSERT(pOffset != VMA_NULL);
9933 
9934  *itemsToMakeLostCount = 0;
9935  *pSumFreeSize = 0;
9936  *pSumItemSize = 0;
9937 
9938  if(canMakeOtherLost)
9939  {
9940  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9941  {
9942  *pSumFreeSize = suballocItem->size;
9943  }
9944  else
9945  {
9946  if(suballocItem->hAllocation->CanBecomeLost() &&
9947  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9948  {
9949  ++*itemsToMakeLostCount;
9950  *pSumItemSize = suballocItem->size;
9951  }
9952  else
9953  {
9954  return false;
9955  }
9956  }
9957 
9958  // Remaining size is too small for this request: Early return.
9959  if(GetSize() - suballocItem->offset < allocSize)
9960  {
9961  return false;
9962  }
9963 
9964  // Start from offset equal to beginning of this suballocation.
9965  *pOffset = suballocItem->offset;
9966 
9967  // Apply VMA_DEBUG_MARGIN at the beginning.
9968  if(VMA_DEBUG_MARGIN > 0)
9969  {
9970  *pOffset += VMA_DEBUG_MARGIN;
9971  }
9972 
9973  // Apply alignment.
9974  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9975 
9976  // Check previous suballocations for BufferImageGranularity conflicts.
9977  // Make bigger alignment if necessary.
9978  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
9979  {
9980  bool bufferImageGranularityConflict = false;
9981  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9982  while(prevSuballocItem != m_Suballocations.cbegin())
9983  {
9984  --prevSuballocItem;
9985  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9986  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9987  {
9988  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9989  {
9990  bufferImageGranularityConflict = true;
9991  break;
9992  }
9993  }
9994  else
9995  // Already on previous page.
9996  break;
9997  }
9998  if(bufferImageGranularityConflict)
9999  {
10000  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
10001  }
10002  }
10003 
10004  // Now that we have final *pOffset, check if we are past suballocItem.
10005  // If yes, return false - this function should be called for another suballocItem as starting point.
10006  if(*pOffset >= suballocItem->offset + suballocItem->size)
10007  {
10008  return false;
10009  }
10010 
10011  // Calculate padding at the beginning based on current offset.
10012  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
10013 
10014  // Calculate required margin at the end.
10015  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
10016 
10017  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
10018  // Another early return check.
10019  if(suballocItem->offset + totalSize > GetSize())
10020  {
10021  return false;
10022  }
10023 
10024  // Advance lastSuballocItem until desired size is reached.
10025  // Update itemsToMakeLostCount.
10026  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
10027  if(totalSize > suballocItem->size)
10028  {
10029  VkDeviceSize remainingSize = totalSize - suballocItem->size;
10030  while(remainingSize > 0)
10031  {
10032  ++lastSuballocItem;
10033  if(lastSuballocItem == m_Suballocations.cend())
10034  {
10035  return false;
10036  }
10037  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
10038  {
10039  *pSumFreeSize += lastSuballocItem->size;
10040  }
10041  else
10042  {
10043  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
10044  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
10045  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10046  {
10047  ++*itemsToMakeLostCount;
10048  *pSumItemSize += lastSuballocItem->size;
10049  }
10050  else
10051  {
10052  return false;
10053  }
10054  }
10055  remainingSize = (lastSuballocItem->size < remainingSize) ?
10056  remainingSize - lastSuballocItem->size : 0;
10057  }
10058  }
10059 
10060  // Check next suballocations for BufferImageGranularity conflicts.
10061  // If conflict exists, we must mark more allocations lost or fail.
10062  if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
10063  {
10064  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
10065  ++nextSuballocItem;
10066  while(nextSuballocItem != m_Suballocations.cend())
10067  {
10068  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
10069  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10070  {
10071  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10072  {
10073  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
10074  if(nextSuballoc.hAllocation->CanBecomeLost() &&
10075  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10076  {
10077  ++*itemsToMakeLostCount;
10078  }
10079  else
10080  {
10081  return false;
10082  }
10083  }
10084  }
10085  else
10086  {
10087  // Already on next page.
10088  break;
10089  }
10090  ++nextSuballocItem;
10091  }
10092  }
10093  }
10094  else
10095  {
10096  const VmaSuballocation& suballoc = *suballocItem;
10097  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10098 
10099  *pSumFreeSize = suballoc.size;
10100 
10101  // Size of this suballocation is too small for this request: Early return.
10102  if(suballoc.size < allocSize)
10103  {
10104  return false;
10105  }
10106 
10107  // Start from offset equal to beginning of this suballocation.
10108  *pOffset = suballoc.offset;
10109 
10110  // Apply VMA_DEBUG_MARGIN at the beginning.
10111  if(VMA_DEBUG_MARGIN > 0)
10112  {
10113  *pOffset += VMA_DEBUG_MARGIN;
10114  }
10115 
10116  // Apply alignment.
10117  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
10118 
10119  // Check previous suballocations for BufferImageGranularity conflicts.
10120  // Make bigger alignment if necessary.
10121  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
10122  {
10123  bool bufferImageGranularityConflict = false;
10124  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
10125  while(prevSuballocItem != m_Suballocations.cbegin())
10126  {
10127  --prevSuballocItem;
10128  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
10129  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
10130  {
10131  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10132  {
10133  bufferImageGranularityConflict = true;
10134  break;
10135  }
10136  }
10137  else
10138  // Already on previous page.
10139  break;
10140  }
10141  if(bufferImageGranularityConflict)
10142  {
10143  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
10144  }
10145  }
10146 
10147  // Calculate padding at the beginning based on current offset.
10148  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
10149 
10150  // Calculate required margin at the end.
10151  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
10152 
10153  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
10154  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
10155  {
10156  return false;
10157  }
10158 
10159  // Check next suballocations for BufferImageGranularity conflicts.
10160  // If conflict exists, allocation cannot be made here.
10161  if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
10162  {
10163  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
10164  ++nextSuballocItem;
10165  while(nextSuballocItem != m_Suballocations.cend())
10166  {
10167  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
10168  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10169  {
10170  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10171  {
10172  return false;
10173  }
10174  }
10175  else
10176  {
10177  // Already on next page.
10178  break;
10179  }
10180  ++nextSuballocItem;
10181  }
10182  }
10183  }
10184 
10185  // All tests passed: Success. pOffset is already filled.
10186  return true;
10187 }
10188 
10189 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
10190 {
10191  VMA_ASSERT(item != m_Suballocations.end());
10192  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
10193 
10194  VmaSuballocationList::iterator nextItem = item;
10195  ++nextItem;
10196  VMA_ASSERT(nextItem != m_Suballocations.end());
10197  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
10198 
10199  item->size += nextItem->size;
10200  --m_FreeCount;
10201  m_Suballocations.erase(nextItem);
10202 }
10203 
10204 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
10205 {
10206  // Change this suballocation to be marked as free.
10207  VmaSuballocation& suballoc = *suballocItem;
10208  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10209  suballoc.hAllocation = VK_NULL_HANDLE;
10210 
10211  // Update totals.
10212  ++m_FreeCount;
10213  m_SumFreeSize += suballoc.size;
10214 
10215  // Merge with previous and/or next suballocation if it's also free.
10216  bool mergeWithNext = false;
10217  bool mergeWithPrev = false;
10218 
10219  VmaSuballocationList::iterator nextItem = suballocItem;
10220  ++nextItem;
10221  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
10222  {
10223  mergeWithNext = true;
10224  }
10225 
10226  VmaSuballocationList::iterator prevItem = suballocItem;
10227  if(suballocItem != m_Suballocations.begin())
10228  {
10229  --prevItem;
10230  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
10231  {
10232  mergeWithPrev = true;
10233  }
10234  }
10235 
10236  if(mergeWithNext)
10237  {
10238  UnregisterFreeSuballocation(nextItem);
10239  MergeFreeWithNext(suballocItem);
10240  }
10241 
10242  if(mergeWithPrev)
10243  {
10244  UnregisterFreeSuballocation(prevItem);
10245  MergeFreeWithNext(prevItem);
10246  RegisterFreeSuballocation(prevItem);
10247  return prevItem;
10248  }
10249  else
10250  {
10251  RegisterFreeSuballocation(suballocItem);
10252  return suballocItem;
10253  }
10254 }
10255 
10256 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
10257 {
10258  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
10259  VMA_ASSERT(item->size > 0);
10260 
10261  // You may want to enable this validation at the beginning or at the end of
10262  // this function, depending on what do you want to check.
10263  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10264 
10265  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
10266  {
10267  if(m_FreeSuballocationsBySize.empty())
10268  {
10269  m_FreeSuballocationsBySize.push_back(item);
10270  }
10271  else
10272  {
10273  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
10274  }
10275  }
10276 
10277  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10278 }
10279 
10280 
10281 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
10282 {
10283  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
10284  VMA_ASSERT(item->size > 0);
10285 
10286  // You may want to enable this validation at the beginning or at the end of
10287  // this function, depending on what do you want to check.
10288  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10289 
10290  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
10291  {
10292  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
10293  m_FreeSuballocationsBySize.data(),
10294  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
10295  item,
10296  VmaSuballocationItemSizeLess());
10297  for(size_t index = it - m_FreeSuballocationsBySize.data();
10298  index < m_FreeSuballocationsBySize.size();
10299  ++index)
10300  {
10301  if(m_FreeSuballocationsBySize[index] == item)
10302  {
10303  VmaVectorRemove(m_FreeSuballocationsBySize, index);
10304  return;
10305  }
10306  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
10307  }
10308  VMA_ASSERT(0 && "Not found.");
10309  }
10310 
10311  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10312 }
10313 
10314 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
10315  VkDeviceSize bufferImageGranularity,
10316  VmaSuballocationType& inOutPrevSuballocType) const
10317 {
10318  if(bufferImageGranularity == 1 || IsEmpty())
10319  {
10320  return false;
10321  }
10322 
10323  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
10324  bool typeConflictFound = false;
10325  for(const auto& suballoc : m_Suballocations)
10326  {
10327  const VmaSuballocationType suballocType = suballoc.type;
10328  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
10329  {
10330  minAlignment = VMA_MIN(minAlignment, suballoc.hAllocation->GetAlignment());
10331  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
10332  {
10333  typeConflictFound = true;
10334  }
10335  inOutPrevSuballocType = suballocType;
10336  }
10337  }
10338 
10339  return typeConflictFound || minAlignment >= bufferImageGranularity;
10340 }
10341 
10343 // class VmaBlockMetadata_Linear
10344 
10345 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
10346  VmaBlockMetadata(hAllocator),
10347  m_SumFreeSize(0),
10348  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
10349  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
10350  m_1stVectorIndex(0),
10351  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
10352  m_1stNullItemsBeginCount(0),
10353  m_1stNullItemsMiddleCount(0),
10354  m_2ndNullItemsCount(0)
10355 {
10356 }
10357 
10358 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
10359 {
10360 }
10361 
10362 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
10363 {
10364  VmaBlockMetadata::Init(size);
10365  m_SumFreeSize = size;
10366 }
10367 
10368 bool VmaBlockMetadata_Linear::Validate() const
10369 {
10370  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10371  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10372 
10373  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
10374  VMA_VALIDATE(!suballocations1st.empty() ||
10375  suballocations2nd.empty() ||
10376  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
10377 
10378  if(!suballocations1st.empty())
10379  {
10380  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
10381  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
10382  // Null item at the end should be just pop_back().
10383  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
10384  }
10385  if(!suballocations2nd.empty())
10386  {
10387  // Null item at the end should be just pop_back().
10388  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
10389  }
10390 
10391  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
10392  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
10393 
10394  VkDeviceSize sumUsedSize = 0;
10395  const size_t suballoc1stCount = suballocations1st.size();
10396  VkDeviceSize offset = VMA_DEBUG_MARGIN;
10397 
10398  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10399  {
10400  const size_t suballoc2ndCount = suballocations2nd.size();
10401  size_t nullItem2ndCount = 0;
10402  for(size_t i = 0; i < suballoc2ndCount; ++i)
10403  {
10404  const VmaSuballocation& suballoc = suballocations2nd[i];
10405  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10406 
10407  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10408  VMA_VALIDATE(suballoc.offset >= offset);
10409 
10410  if(!currFree)
10411  {
10412  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10413  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10414  sumUsedSize += suballoc.size;
10415  }
10416  else
10417  {
10418  ++nullItem2ndCount;
10419  }
10420 
10421  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10422  }
10423 
10424  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10425  }
10426 
10427  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
10428  {
10429  const VmaSuballocation& suballoc = suballocations1st[i];
10430  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
10431  suballoc.hAllocation == VK_NULL_HANDLE);
10432  }
10433 
10434  size_t nullItem1stCount = m_1stNullItemsBeginCount;
10435 
10436  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
10437  {
10438  const VmaSuballocation& suballoc = suballocations1st[i];
10439  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10440 
10441  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10442  VMA_VALIDATE(suballoc.offset >= offset);
10443  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
10444 
10445  if(!currFree)
10446  {
10447  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10448  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10449  sumUsedSize += suballoc.size;
10450  }
10451  else
10452  {
10453  ++nullItem1stCount;
10454  }
10455 
10456  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10457  }
10458  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
10459 
10460  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10461  {
10462  const size_t suballoc2ndCount = suballocations2nd.size();
10463  size_t nullItem2ndCount = 0;
10464  for(size_t i = suballoc2ndCount; i--; )
10465  {
10466  const VmaSuballocation& suballoc = suballocations2nd[i];
10467  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10468 
10469  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10470  VMA_VALIDATE(suballoc.offset >= offset);
10471 
10472  if(!currFree)
10473  {
10474  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10475  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10476  sumUsedSize += suballoc.size;
10477  }
10478  else
10479  {
10480  ++nullItem2ndCount;
10481  }
10482 
10483  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10484  }
10485 
10486  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10487  }
10488 
10489  VMA_VALIDATE(offset <= GetSize());
10490  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
10491 
10492  return true;
10493 }
10494 
10495 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
10496 {
10497  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
10498  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
10499 }
10500 
10501 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
10502 {
10503  const VkDeviceSize size = GetSize();
10504 
10505  /*
10506  We don't consider gaps inside allocation vectors with freed allocations because
10507  they are not suitable for reuse in linear allocator. We consider only space that
10508  is available for new allocations.
10509  */
10510  if(IsEmpty())
10511  {
10512  return size;
10513  }
10514 
10515  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10516 
10517  switch(m_2ndVectorMode)
10518  {
10519  case SECOND_VECTOR_EMPTY:
10520  /*
10521  Available space is after end of 1st, as well as before beginning of 1st (which
10522  would make it a ring buffer).
10523  */
10524  {
10525  const size_t suballocations1stCount = suballocations1st.size();
10526  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
10527  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10528  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
10529  return VMA_MAX(
10530  firstSuballoc.offset,
10531  size - (lastSuballoc.offset + lastSuballoc.size));
10532  }
10533  break;
10534 
10535  case SECOND_VECTOR_RING_BUFFER:
10536  /*
10537  Available space is only between end of 2nd and beginning of 1st.
10538  */
10539  {
10540  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10541  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
10542  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
10543  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
10544  }
10545  break;
10546 
10547  case SECOND_VECTOR_DOUBLE_STACK:
10548  /*
10549  Available space is only between end of 1st and top of 2nd.
10550  */
10551  {
10552  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10553  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
10554  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
10555  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
10556  }
10557  break;
10558 
10559  default:
10560  VMA_ASSERT(0);
10561  return 0;
10562  }
10563 }
10564 
10565 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10566 {
10567  const VkDeviceSize size = GetSize();
10568  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10569  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10570  const size_t suballoc1stCount = suballocations1st.size();
10571  const size_t suballoc2ndCount = suballocations2nd.size();
10572 
10573  outInfo.blockCount = 1;
10574  outInfo.allocationCount = (uint32_t)GetAllocationCount();
10575  outInfo.unusedRangeCount = 0;
10576  outInfo.usedBytes = 0;
10577  outInfo.allocationSizeMin = UINT64_MAX;
10578  outInfo.allocationSizeMax = 0;
10579  outInfo.unusedRangeSizeMin = UINT64_MAX;
10580  outInfo.unusedRangeSizeMax = 0;
10581 
10582  VkDeviceSize lastOffset = 0;
10583 
10584  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10585  {
10586  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10587  size_t nextAlloc2ndIndex = 0;
10588  while(lastOffset < freeSpace2ndTo1stEnd)
10589  {
10590  // Find next non-null allocation or move nextAllocIndex to the end.
10591  while(nextAlloc2ndIndex < suballoc2ndCount &&
10592  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10593  {
10594  ++nextAlloc2ndIndex;
10595  }
10596 
10597  // Found non-null allocation.
10598  if(nextAlloc2ndIndex < suballoc2ndCount)
10599  {
10600  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10601 
10602  // 1. Process free space before this allocation.
10603  if(lastOffset < suballoc.offset)
10604  {
10605  // There is free space from lastOffset to suballoc.offset.
10606  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10607  ++outInfo.unusedRangeCount;
10608  outInfo.unusedBytes += unusedRangeSize;
10609  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10610  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10611  }
10612 
10613  // 2. Process this allocation.
10614  // There is allocation with suballoc.offset, suballoc.size.
10615  outInfo.usedBytes += suballoc.size;
10616  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10617  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10618 
10619  // 3. Prepare for next iteration.
10620  lastOffset = suballoc.offset + suballoc.size;
10621  ++nextAlloc2ndIndex;
10622  }
10623  // We are at the end.
10624  else
10625  {
10626  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10627  if(lastOffset < freeSpace2ndTo1stEnd)
10628  {
10629  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10630  ++outInfo.unusedRangeCount;
10631  outInfo.unusedBytes += unusedRangeSize;
10632  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10633  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10634  }
10635 
10636  // End of loop.
10637  lastOffset = freeSpace2ndTo1stEnd;
10638  }
10639  }
10640  }
10641 
10642  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10643  const VkDeviceSize freeSpace1stTo2ndEnd =
10644  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10645  while(lastOffset < freeSpace1stTo2ndEnd)
10646  {
10647  // Find next non-null allocation or move nextAllocIndex to the end.
10648  while(nextAlloc1stIndex < suballoc1stCount &&
10649  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10650  {
10651  ++nextAlloc1stIndex;
10652  }
10653 
10654  // Found non-null allocation.
10655  if(nextAlloc1stIndex < suballoc1stCount)
10656  {
10657  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10658 
10659  // 1. Process free space before this allocation.
10660  if(lastOffset < suballoc.offset)
10661  {
10662  // There is free space from lastOffset to suballoc.offset.
10663  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10664  ++outInfo.unusedRangeCount;
10665  outInfo.unusedBytes += unusedRangeSize;
10666  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10667  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10668  }
10669 
10670  // 2. Process this allocation.
10671  // There is allocation with suballoc.offset, suballoc.size.
10672  outInfo.usedBytes += suballoc.size;
10673  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10674  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10675 
10676  // 3. Prepare for next iteration.
10677  lastOffset = suballoc.offset + suballoc.size;
10678  ++nextAlloc1stIndex;
10679  }
10680  // We are at the end.
10681  else
10682  {
10683  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10684  if(lastOffset < freeSpace1stTo2ndEnd)
10685  {
10686  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10687  ++outInfo.unusedRangeCount;
10688  outInfo.unusedBytes += unusedRangeSize;
10689  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10690  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10691  }
10692 
10693  // End of loop.
10694  lastOffset = freeSpace1stTo2ndEnd;
10695  }
10696  }
10697 
10698  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10699  {
10700  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10701  while(lastOffset < size)
10702  {
10703  // Find next non-null allocation or move nextAllocIndex to the end.
10704  while(nextAlloc2ndIndex != SIZE_MAX &&
10705  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10706  {
10707  --nextAlloc2ndIndex;
10708  }
10709 
10710  // Found non-null allocation.
10711  if(nextAlloc2ndIndex != SIZE_MAX)
10712  {
10713  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10714 
10715  // 1. Process free space before this allocation.
10716  if(lastOffset < suballoc.offset)
10717  {
10718  // There is free space from lastOffset to suballoc.offset.
10719  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10720  ++outInfo.unusedRangeCount;
10721  outInfo.unusedBytes += unusedRangeSize;
10722  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10723  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10724  }
10725 
10726  // 2. Process this allocation.
10727  // There is allocation with suballoc.offset, suballoc.size.
10728  outInfo.usedBytes += suballoc.size;
10729  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10730  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10731 
10732  // 3. Prepare for next iteration.
10733  lastOffset = suballoc.offset + suballoc.size;
10734  --nextAlloc2ndIndex;
10735  }
10736  // We are at the end.
10737  else
10738  {
10739  // There is free space from lastOffset to size.
10740  if(lastOffset < size)
10741  {
10742  const VkDeviceSize unusedRangeSize = size - lastOffset;
10743  ++outInfo.unusedRangeCount;
10744  outInfo.unusedBytes += unusedRangeSize;
10745  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10746  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10747  }
10748 
10749  // End of loop.
10750  lastOffset = size;
10751  }
10752  }
10753  }
10754 
10755  outInfo.unusedBytes = size - outInfo.usedBytes;
10756 }
10757 
10758 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
10759 {
10760  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10761  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10762  const VkDeviceSize size = GetSize();
10763  const size_t suballoc1stCount = suballocations1st.size();
10764  const size_t suballoc2ndCount = suballocations2nd.size();
10765 
10766  inoutStats.size += size;
10767 
10768  VkDeviceSize lastOffset = 0;
10769 
10770  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10771  {
10772  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10773  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
10774  while(lastOffset < freeSpace2ndTo1stEnd)
10775  {
10776  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10777  while(nextAlloc2ndIndex < suballoc2ndCount &&
10778  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10779  {
10780  ++nextAlloc2ndIndex;
10781  }
10782 
10783  // Found non-null allocation.
10784  if(nextAlloc2ndIndex < suballoc2ndCount)
10785  {
10786  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10787 
10788  // 1. Process free space before this allocation.
10789  if(lastOffset < suballoc.offset)
10790  {
10791  // There is free space from lastOffset to suballoc.offset.
10792  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10793  inoutStats.unusedSize += unusedRangeSize;
10794  ++inoutStats.unusedRangeCount;
10795  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10796  }
10797 
10798  // 2. Process this allocation.
10799  // There is allocation with suballoc.offset, suballoc.size.
10800  ++inoutStats.allocationCount;
10801 
10802  // 3. Prepare for next iteration.
10803  lastOffset = suballoc.offset + suballoc.size;
10804  ++nextAlloc2ndIndex;
10805  }
10806  // We are at the end.
10807  else
10808  {
10809  if(lastOffset < freeSpace2ndTo1stEnd)
10810  {
10811  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10812  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10813  inoutStats.unusedSize += unusedRangeSize;
10814  ++inoutStats.unusedRangeCount;
10815  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10816  }
10817 
10818  // End of loop.
10819  lastOffset = freeSpace2ndTo1stEnd;
10820  }
10821  }
10822  }
10823 
10824  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10825  const VkDeviceSize freeSpace1stTo2ndEnd =
10826  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10827  while(lastOffset < freeSpace1stTo2ndEnd)
10828  {
10829  // Find next non-null allocation or move nextAllocIndex to the end.
10830  while(nextAlloc1stIndex < suballoc1stCount &&
10831  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10832  {
10833  ++nextAlloc1stIndex;
10834  }
10835 
10836  // Found non-null allocation.
10837  if(nextAlloc1stIndex < suballoc1stCount)
10838  {
10839  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10840 
10841  // 1. Process free space before this allocation.
10842  if(lastOffset < suballoc.offset)
10843  {
10844  // There is free space from lastOffset to suballoc.offset.
10845  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10846  inoutStats.unusedSize += unusedRangeSize;
10847  ++inoutStats.unusedRangeCount;
10848  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10849  }
10850 
10851  // 2. Process this allocation.
10852  // There is allocation with suballoc.offset, suballoc.size.
10853  ++inoutStats.allocationCount;
10854 
10855  // 3. Prepare for next iteration.
10856  lastOffset = suballoc.offset + suballoc.size;
10857  ++nextAlloc1stIndex;
10858  }
10859  // We are at the end.
10860  else
10861  {
10862  if(lastOffset < freeSpace1stTo2ndEnd)
10863  {
10864  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10865  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10866  inoutStats.unusedSize += unusedRangeSize;
10867  ++inoutStats.unusedRangeCount;
10868  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10869  }
10870 
10871  // End of loop.
10872  lastOffset = freeSpace1stTo2ndEnd;
10873  }
10874  }
10875 
10876  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10877  {
10878  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10879  while(lastOffset < size)
10880  {
10881  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10882  while(nextAlloc2ndIndex != SIZE_MAX &&
10883  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10884  {
10885  --nextAlloc2ndIndex;
10886  }
10887 
10888  // Found non-null allocation.
10889  if(nextAlloc2ndIndex != SIZE_MAX)
10890  {
10891  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10892 
10893  // 1. Process free space before this allocation.
10894  if(lastOffset < suballoc.offset)
10895  {
10896  // There is free space from lastOffset to suballoc.offset.
10897  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10898  inoutStats.unusedSize += unusedRangeSize;
10899  ++inoutStats.unusedRangeCount;
10900  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10901  }
10902 
10903  // 2. Process this allocation.
10904  // There is allocation with suballoc.offset, suballoc.size.
10905  ++inoutStats.allocationCount;
10906 
10907  // 3. Prepare for next iteration.
10908  lastOffset = suballoc.offset + suballoc.size;
10909  --nextAlloc2ndIndex;
10910  }
10911  // We are at the end.
10912  else
10913  {
10914  if(lastOffset < size)
10915  {
10916  // There is free space from lastOffset to size.
10917  const VkDeviceSize unusedRangeSize = size - lastOffset;
10918  inoutStats.unusedSize += unusedRangeSize;
10919  ++inoutStats.unusedRangeCount;
10920  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10921  }
10922 
10923  // End of loop.
10924  lastOffset = size;
10925  }
10926  }
10927  }
10928 }
10929 
10930 #if VMA_STATS_STRING_ENABLED
10931 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
10932 {
10933  const VkDeviceSize size = GetSize();
10934  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10935  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10936  const size_t suballoc1stCount = suballocations1st.size();
10937  const size_t suballoc2ndCount = suballocations2nd.size();
10938 
10939  // FIRST PASS
10940 
10941  size_t unusedRangeCount = 0;
10942  VkDeviceSize usedBytes = 0;
10943 
10944  VkDeviceSize lastOffset = 0;
10945 
10946  size_t alloc2ndCount = 0;
10947  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10948  {
10949  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10950  size_t nextAlloc2ndIndex = 0;
10951  while(lastOffset < freeSpace2ndTo1stEnd)
10952  {
10953  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10954  while(nextAlloc2ndIndex < suballoc2ndCount &&
10955  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10956  {
10957  ++nextAlloc2ndIndex;
10958  }
10959 
10960  // Found non-null allocation.
10961  if(nextAlloc2ndIndex < suballoc2ndCount)
10962  {
10963  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10964 
10965  // 1. Process free space before this allocation.
10966  if(lastOffset < suballoc.offset)
10967  {
10968  // There is free space from lastOffset to suballoc.offset.
10969  ++unusedRangeCount;
10970  }
10971 
10972  // 2. Process this allocation.
10973  // There is allocation with suballoc.offset, suballoc.size.
10974  ++alloc2ndCount;
10975  usedBytes += suballoc.size;
10976 
10977  // 3. Prepare for next iteration.
10978  lastOffset = suballoc.offset + suballoc.size;
10979  ++nextAlloc2ndIndex;
10980  }
10981  // We are at the end.
10982  else
10983  {
10984  if(lastOffset < freeSpace2ndTo1stEnd)
10985  {
10986  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10987  ++unusedRangeCount;
10988  }
10989 
10990  // End of loop.
10991  lastOffset = freeSpace2ndTo1stEnd;
10992  }
10993  }
10994  }
10995 
10996  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10997  size_t alloc1stCount = 0;
10998  const VkDeviceSize freeSpace1stTo2ndEnd =
10999  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
11000  while(lastOffset < freeSpace1stTo2ndEnd)
11001  {
11002  // Find next non-null allocation or move nextAllocIndex to the end.
11003  while(nextAlloc1stIndex < suballoc1stCount &&
11004  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
11005  {
11006  ++nextAlloc1stIndex;
11007  }
11008 
11009  // Found non-null allocation.
11010  if(nextAlloc1stIndex < suballoc1stCount)
11011  {
11012  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
11013 
11014  // 1. Process free space before this allocation.
11015  if(lastOffset < suballoc.offset)
11016  {
11017  // There is free space from lastOffset to suballoc.offset.
11018  ++unusedRangeCount;
11019  }
11020 
11021  // 2. Process this allocation.
11022  // There is allocation with suballoc.offset, suballoc.size.
11023  ++alloc1stCount;
11024  usedBytes += suballoc.size;
11025 
11026  // 3. Prepare for next iteration.
11027  lastOffset = suballoc.offset + suballoc.size;
11028  ++nextAlloc1stIndex;
11029  }
11030  // We are at the end.
11031  else
11032  {
11033  if(lastOffset < size)
11034  {
11035  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
11036  ++unusedRangeCount;
11037  }
11038 
11039  // End of loop.
11040  lastOffset = freeSpace1stTo2ndEnd;
11041  }
11042  }
11043 
11044  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11045  {
11046  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
11047  while(lastOffset < size)
11048  {
11049  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
11050  while(nextAlloc2ndIndex != SIZE_MAX &&
11051  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
11052  {
11053  --nextAlloc2ndIndex;
11054  }
11055 
11056  // Found non-null allocation.
11057  if(nextAlloc2ndIndex != SIZE_MAX)
11058  {
11059  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
11060 
11061  // 1. Process free space before this allocation.
11062  if(lastOffset < suballoc.offset)
11063  {
11064  // There is free space from lastOffset to suballoc.offset.
11065  ++unusedRangeCount;
11066  }
11067 
11068  // 2. Process this allocation.
11069  // There is allocation with suballoc.offset, suballoc.size.
11070  ++alloc2ndCount;
11071  usedBytes += suballoc.size;
11072 
11073  // 3. Prepare for next iteration.
11074  lastOffset = suballoc.offset + suballoc.size;
11075  --nextAlloc2ndIndex;
11076  }
11077  // We are at the end.
11078  else
11079  {
11080  if(lastOffset < size)
11081  {
11082  // There is free space from lastOffset to size.
11083  ++unusedRangeCount;
11084  }
11085 
11086  // End of loop.
11087  lastOffset = size;
11088  }
11089  }
11090  }
11091 
11092  const VkDeviceSize unusedBytes = size - usedBytes;
11093  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
11094 
11095  // SECOND PASS
11096  lastOffset = 0;
11097 
11098  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11099  {
11100  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
11101  size_t nextAlloc2ndIndex = 0;
11102  while(lastOffset < freeSpace2ndTo1stEnd)
11103  {
11104  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
11105  while(nextAlloc2ndIndex < suballoc2ndCount &&
11106  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
11107  {
11108  ++nextAlloc2ndIndex;
11109  }
11110 
11111  // Found non-null allocation.
11112  if(nextAlloc2ndIndex < suballoc2ndCount)
11113  {
11114  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
11115 
11116  // 1. Process free space before this allocation.
11117  if(lastOffset < suballoc.offset)
11118  {
11119  // There is free space from lastOffset to suballoc.offset.
11120  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
11121  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11122  }
11123 
11124  // 2. Process this allocation.
11125  // There is allocation with suballoc.offset, suballoc.size.
11126  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
11127 
11128  // 3. Prepare for next iteration.
11129  lastOffset = suballoc.offset + suballoc.size;
11130  ++nextAlloc2ndIndex;
11131  }
11132  // We are at the end.
11133  else
11134  {
11135  if(lastOffset < freeSpace2ndTo1stEnd)
11136  {
11137  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
11138  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
11139  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11140  }
11141 
11142  // End of loop.
11143  lastOffset = freeSpace2ndTo1stEnd;
11144  }
11145  }
11146  }
11147 
11148  nextAlloc1stIndex = m_1stNullItemsBeginCount;
11149  while(lastOffset < freeSpace1stTo2ndEnd)
11150  {
11151  // Find next non-null allocation or move nextAllocIndex to the end.
11152  while(nextAlloc1stIndex < suballoc1stCount &&
11153  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
11154  {
11155  ++nextAlloc1stIndex;
11156  }
11157 
11158  // Found non-null allocation.
11159  if(nextAlloc1stIndex < suballoc1stCount)
11160  {
11161  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
11162 
11163  // 1. Process free space before this allocation.
11164  if(lastOffset < suballoc.offset)
11165  {
11166  // There is free space from lastOffset to suballoc.offset.
11167  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
11168  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11169  }
11170 
11171  // 2. Process this allocation.
11172  // There is allocation with suballoc.offset, suballoc.size.
11173  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
11174 
11175  // 3. Prepare for next iteration.
11176  lastOffset = suballoc.offset + suballoc.size;
11177  ++nextAlloc1stIndex;
11178  }
11179  // We are at the end.
11180  else
11181  {
11182  if(lastOffset < freeSpace1stTo2ndEnd)
11183  {
11184  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
11185  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
11186  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11187  }
11188 
11189  // End of loop.
11190  lastOffset = freeSpace1stTo2ndEnd;
11191  }
11192  }
11193 
11194  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11195  {
11196  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
11197  while(lastOffset < size)
11198  {
11199  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
11200  while(nextAlloc2ndIndex != SIZE_MAX &&
11201  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
11202  {
11203  --nextAlloc2ndIndex;
11204  }
11205 
11206  // Found non-null allocation.
11207  if(nextAlloc2ndIndex != SIZE_MAX)
11208  {
11209  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
11210 
11211  // 1. Process free space before this allocation.
11212  if(lastOffset < suballoc.offset)
11213  {
11214  // There is free space from lastOffset to suballoc.offset.
11215  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
11216  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11217  }
11218 
11219  // 2. Process this allocation.
11220  // There is allocation with suballoc.offset, suballoc.size.
11221  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
11222 
11223  // 3. Prepare for next iteration.
11224  lastOffset = suballoc.offset + suballoc.size;
11225  --nextAlloc2ndIndex;
11226  }
11227  // We are at the end.
11228  else
11229  {
11230  if(lastOffset < size)
11231  {
11232  // There is free space from lastOffset to size.
11233  const VkDeviceSize unusedRangeSize = size - lastOffset;
11234  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11235  }
11236 
11237  // End of loop.
11238  lastOffset = size;
11239  }
11240  }
11241  }
11242 
11243  PrintDetailedMap_End(json);
11244 }
11245 #endif // #if VMA_STATS_STRING_ENABLED
11246 
11247 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
11248  uint32_t currentFrameIndex,
11249  uint32_t frameInUseCount,
11250  VkDeviceSize bufferImageGranularity,
11251  VkDeviceSize allocSize,
11252  VkDeviceSize allocAlignment,
11253  bool upperAddress,
11254  VmaSuballocationType allocType,
11255  bool canMakeOtherLost,
11256  uint32_t strategy,
11257  VmaAllocationRequest* pAllocationRequest)
11258 {
11259  VMA_ASSERT(allocSize > 0);
11260  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
11261  VMA_ASSERT(pAllocationRequest != VMA_NULL);
11262  VMA_HEAVY_ASSERT(Validate());
11263  return upperAddress ?
11264  CreateAllocationRequest_UpperAddress(
11265  currentFrameIndex, frameInUseCount, bufferImageGranularity,
11266  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
11267  CreateAllocationRequest_LowerAddress(
11268  currentFrameIndex, frameInUseCount, bufferImageGranularity,
11269  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
11270 }
11271 
11272 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
11273  uint32_t currentFrameIndex,
11274  uint32_t frameInUseCount,
11275  VkDeviceSize bufferImageGranularity,
11276  VkDeviceSize allocSize,
11277  VkDeviceSize allocAlignment,
11278  VmaSuballocationType allocType,
11279  bool canMakeOtherLost,
11280  uint32_t strategy,
11281  VmaAllocationRequest* pAllocationRequest)
11282 {
11283  const VkDeviceSize size = GetSize();
11284  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11285  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11286 
11287  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11288  {
11289  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
11290  return false;
11291  }
11292 
11293  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
11294  if(allocSize > size)
11295  {
11296  return false;
11297  }
11298  VkDeviceSize resultBaseOffset = size - allocSize;
11299  if(!suballocations2nd.empty())
11300  {
11301  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
11302  resultBaseOffset = lastSuballoc.offset - allocSize;
11303  if(allocSize > lastSuballoc.offset)
11304  {
11305  return false;
11306  }
11307  }
11308 
11309  // Start from offset equal to end of free space.
11310  VkDeviceSize resultOffset = resultBaseOffset;
11311 
11312  // Apply VMA_DEBUG_MARGIN at the end.
11313  if(VMA_DEBUG_MARGIN > 0)
11314  {
11315  if(resultOffset < VMA_DEBUG_MARGIN)
11316  {
11317  return false;
11318  }
11319  resultOffset -= VMA_DEBUG_MARGIN;
11320  }
11321 
11322  // Apply alignment.
11323  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
11324 
11325  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
11326  // Make bigger alignment if necessary.
11327  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
11328  {
11329  bool bufferImageGranularityConflict = false;
11330  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
11331  {
11332  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
11333  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11334  {
11335  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
11336  {
11337  bufferImageGranularityConflict = true;
11338  break;
11339  }
11340  }
11341  else
11342  // Already on previous page.
11343  break;
11344  }
11345  if(bufferImageGranularityConflict)
11346  {
11347  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
11348  }
11349  }
11350 
11351  // There is enough free space.
11352  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
11353  suballocations1st.back().offset + suballocations1st.back().size :
11354  0;
11355  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
11356  {
11357  // Check previous suballocations for BufferImageGranularity conflicts.
11358  // If conflict exists, allocation cannot be made here.
11359  if(bufferImageGranularity > 1)
11360  {
11361  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11362  {
11363  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11364  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11365  {
11366  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
11367  {
11368  return false;
11369  }
11370  }
11371  else
11372  {
11373  // Already on next page.
11374  break;
11375  }
11376  }
11377  }
11378 
11379  // All tests passed: Success.
11380  pAllocationRequest->offset = resultOffset;
11381  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
11382  pAllocationRequest->sumItemSize = 0;
11383  // pAllocationRequest->item unused.
11384  pAllocationRequest->itemsToMakeLostCount = 0;
11385  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
11386  return true;
11387  }
11388 
11389  return false;
11390 }
11391 
11392 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
11393  uint32_t currentFrameIndex,
11394  uint32_t frameInUseCount,
11395  VkDeviceSize bufferImageGranularity,
11396  VkDeviceSize allocSize,
11397  VkDeviceSize allocAlignment,
11398  VmaSuballocationType allocType,
11399  bool canMakeOtherLost,
11400  uint32_t strategy,
11401  VmaAllocationRequest* pAllocationRequest)
11402 {
11403  const VkDeviceSize size = GetSize();
11404  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11405  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11406 
11407  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11408  {
11409  // Try to allocate at the end of 1st vector.
11410 
11411  VkDeviceSize resultBaseOffset = 0;
11412  if(!suballocations1st.empty())
11413  {
11414  const VmaSuballocation& lastSuballoc = suballocations1st.back();
11415  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11416  }
11417 
11418  // Start from offset equal to beginning of free space.
11419  VkDeviceSize resultOffset = resultBaseOffset;
11420 
11421  // Apply VMA_DEBUG_MARGIN at the beginning.
11422  if(VMA_DEBUG_MARGIN > 0)
11423  {
11424  resultOffset += VMA_DEBUG_MARGIN;
11425  }
11426 
11427  // Apply alignment.
11428  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11429 
11430  // Check previous suballocations for BufferImageGranularity conflicts.
11431  // Make bigger alignment if necessary.
11432  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
11433  {
11434  bool bufferImageGranularityConflict = false;
11435  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11436  {
11437  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11438  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11439  {
11440  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11441  {
11442  bufferImageGranularityConflict = true;
11443  break;
11444  }
11445  }
11446  else
11447  // Already on previous page.
11448  break;
11449  }
11450  if(bufferImageGranularityConflict)
11451  {
11452  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11453  }
11454  }
11455 
11456  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
11457  suballocations2nd.back().offset : size;
11458 
11459  // There is enough free space at the end after alignment.
11460  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
11461  {
11462  // Check next suballocations for BufferImageGranularity conflicts.
11463  // If conflict exists, allocation cannot be made here.
11464  if((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11465  {
11466  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
11467  {
11468  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
11469  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11470  {
11471  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11472  {
11473  return false;
11474  }
11475  }
11476  else
11477  {
11478  // Already on previous page.
11479  break;
11480  }
11481  }
11482  }
11483 
11484  // All tests passed: Success.
11485  pAllocationRequest->offset = resultOffset;
11486  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
11487  pAllocationRequest->sumItemSize = 0;
11488  // pAllocationRequest->item, customData unused.
11489  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
11490  pAllocationRequest->itemsToMakeLostCount = 0;
11491  return true;
11492  }
11493  }
11494 
11495  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
11496  // beginning of 1st vector as the end of free space.
11497  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11498  {
11499  VMA_ASSERT(!suballocations1st.empty());
11500 
11501  VkDeviceSize resultBaseOffset = 0;
11502  if(!suballocations2nd.empty())
11503  {
11504  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
11505  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11506  }
11507 
11508  // Start from offset equal to beginning of free space.
11509  VkDeviceSize resultOffset = resultBaseOffset;
11510 
11511  // Apply VMA_DEBUG_MARGIN at the beginning.
11512  if(VMA_DEBUG_MARGIN > 0)
11513  {
11514  resultOffset += VMA_DEBUG_MARGIN;
11515  }
11516 
11517  // Apply alignment.
11518  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11519 
11520  // Check previous suballocations for BufferImageGranularity conflicts.
11521  // Make bigger alignment if necessary.
11522  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
11523  {
11524  bool bufferImageGranularityConflict = false;
11525  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
11526  {
11527  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
11528  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11529  {
11530  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11531  {
11532  bufferImageGranularityConflict = true;
11533  break;
11534  }
11535  }
11536  else
11537  // Already on previous page.
11538  break;
11539  }
11540  if(bufferImageGranularityConflict)
11541  {
11542  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11543  }
11544  }
11545 
11546  pAllocationRequest->itemsToMakeLostCount = 0;
11547  pAllocationRequest->sumItemSize = 0;
11548  size_t index1st = m_1stNullItemsBeginCount;
11549 
11550  if(canMakeOtherLost)
11551  {
11552  while(index1st < suballocations1st.size() &&
11553  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
11554  {
11555  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
11556  const VmaSuballocation& suballoc = suballocations1st[index1st];
11557  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
11558  {
11559  // No problem.
11560  }
11561  else
11562  {
11563  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11564  if(suballoc.hAllocation->CanBecomeLost() &&
11565  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11566  {
11567  ++pAllocationRequest->itemsToMakeLostCount;
11568  pAllocationRequest->sumItemSize += suballoc.size;
11569  }
11570  else
11571  {
11572  return false;
11573  }
11574  }
11575  ++index1st;
11576  }
11577 
11578  // Check next suballocations for BufferImageGranularity conflicts.
11579  // If conflict exists, we must mark more allocations lost or fail.
11580  if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
11581  {
11582  while(index1st < suballocations1st.size())
11583  {
11584  const VmaSuballocation& suballoc = suballocations1st[index1st];
11585  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
11586  {
11587  if(suballoc.hAllocation != VK_NULL_HANDLE)
11588  {
11589  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
11590  if(suballoc.hAllocation->CanBecomeLost() &&
11591  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11592  {
11593  ++pAllocationRequest->itemsToMakeLostCount;
11594  pAllocationRequest->sumItemSize += suballoc.size;
11595  }
11596  else
11597  {
11598  return false;
11599  }
11600  }
11601  }
11602  else
11603  {
11604  // Already on next page.
11605  break;
11606  }
11607  ++index1st;
11608  }
11609  }
11610 
11611  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
11612  if(index1st == suballocations1st.size() &&
11613  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
11614  {
11615  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
11616  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
11617  }
11618  }
11619 
11620  // There is enough free space at the end after alignment.
11621  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
11622  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
11623  {
11624  // Check next suballocations for BufferImageGranularity conflicts.
11625  // If conflict exists, allocation cannot be made here.
11626  if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
11627  {
11628  for(size_t nextSuballocIndex = index1st;
11629  nextSuballocIndex < suballocations1st.size();
11630  nextSuballocIndex++)
11631  {
11632  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
11633  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11634  {
11635  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11636  {
11637  return false;
11638  }
11639  }
11640  else
11641  {
11642  // Already on next page.
11643  break;
11644  }
11645  }
11646  }
11647 
11648  // All tests passed: Success.
11649  pAllocationRequest->offset = resultOffset;
11650  pAllocationRequest->sumFreeSize =
11651  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
11652  - resultBaseOffset
11653  - pAllocationRequest->sumItemSize;
11654  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
11655  // pAllocationRequest->item, customData unused.
11656  return true;
11657  }
11658  }
11659 
11660  return false;
11661 }
11662 
11663 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
11664  uint32_t currentFrameIndex,
11665  uint32_t frameInUseCount,
11666  VmaAllocationRequest* pAllocationRequest)
11667 {
11668  if(pAllocationRequest->itemsToMakeLostCount == 0)
11669  {
11670  return true;
11671  }
11672 
11673  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
11674 
11675  // We always start from 1st.
11676  SuballocationVectorType* suballocations = &AccessSuballocations1st();
11677  size_t index = m_1stNullItemsBeginCount;
11678  size_t madeLostCount = 0;
11679  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
11680  {
11681  if(index == suballocations->size())
11682  {
11683  index = 0;
11684  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
11685  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11686  {
11687  suballocations = &AccessSuballocations2nd();
11688  }
11689  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
11690  // suballocations continues pointing at AccessSuballocations1st().
11691  VMA_ASSERT(!suballocations->empty());
11692  }
11693  VmaSuballocation& suballoc = (*suballocations)[index];
11694  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11695  {
11696  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11697  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
11698  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11699  {
11700  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11701  suballoc.hAllocation = VK_NULL_HANDLE;
11702  m_SumFreeSize += suballoc.size;
11703  if(suballocations == &AccessSuballocations1st())
11704  {
11705  ++m_1stNullItemsMiddleCount;
11706  }
11707  else
11708  {
11709  ++m_2ndNullItemsCount;
11710  }
11711  ++madeLostCount;
11712  }
11713  else
11714  {
11715  return false;
11716  }
11717  }
11718  ++index;
11719  }
11720 
11721  CleanupAfterFree();
11722  //VMA_HEAVY_ASSERT(Validate()); // Already called by CleanupAfterFree().
11723 
11724  return true;
11725 }
11726 
11727 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11728 {
11729  uint32_t lostAllocationCount = 0;
11730 
11731  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11732  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11733  {
11734  VmaSuballocation& suballoc = suballocations1st[i];
11735  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11736  suballoc.hAllocation->CanBecomeLost() &&
11737  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11738  {
11739  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11740  suballoc.hAllocation = VK_NULL_HANDLE;
11741  ++m_1stNullItemsMiddleCount;
11742  m_SumFreeSize += suballoc.size;
11743  ++lostAllocationCount;
11744  }
11745  }
11746 
11747  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11748  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11749  {
11750  VmaSuballocation& suballoc = suballocations2nd[i];
11751  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11752  suballoc.hAllocation->CanBecomeLost() &&
11753  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11754  {
11755  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11756  suballoc.hAllocation = VK_NULL_HANDLE;
11757  ++m_2ndNullItemsCount;
11758  m_SumFreeSize += suballoc.size;
11759  ++lostAllocationCount;
11760  }
11761  }
11762 
11763  if(lostAllocationCount)
11764  {
11765  CleanupAfterFree();
11766  }
11767 
11768  return lostAllocationCount;
11769 }
11770 
11771 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
11772 {
11773  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11774  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11775  {
11776  const VmaSuballocation& suballoc = suballocations1st[i];
11777  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11778  {
11779  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11780  {
11781  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11782  return VK_ERROR_VALIDATION_FAILED_EXT;
11783  }
11784  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11785  {
11786  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11787  return VK_ERROR_VALIDATION_FAILED_EXT;
11788  }
11789  }
11790  }
11791 
11792  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11793  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11794  {
11795  const VmaSuballocation& suballoc = suballocations2nd[i];
11796  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11797  {
11798  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11799  {
11800  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11801  return VK_ERROR_VALIDATION_FAILED_EXT;
11802  }
11803  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11804  {
11805  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11806  return VK_ERROR_VALIDATION_FAILED_EXT;
11807  }
11808  }
11809  }
11810 
11811  return VK_SUCCESS;
11812 }
11813 
11814 void VmaBlockMetadata_Linear::Alloc(
11815  const VmaAllocationRequest& request,
11816  VmaSuballocationType type,
11817  VkDeviceSize allocSize,
11818  VmaAllocation hAllocation)
11819 {
11820  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
11821 
11822  switch(request.type)
11823  {
11824  case VmaAllocationRequestType::UpperAddress:
11825  {
11826  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
11827  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
11828  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11829  suballocations2nd.push_back(newSuballoc);
11830  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
11831  }
11832  break;
11833  case VmaAllocationRequestType::EndOf1st:
11834  {
11835  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11836 
11837  VMA_ASSERT(suballocations1st.empty() ||
11838  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
11839  // Check if it fits before the end of the block.
11840  VMA_ASSERT(request.offset + allocSize <= GetSize());
11841 
11842  suballocations1st.push_back(newSuballoc);
11843  }
11844  break;
11845  case VmaAllocationRequestType::EndOf2nd:
11846  {
11847  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11848  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
11849  VMA_ASSERT(!suballocations1st.empty() &&
11850  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
11851  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11852 
11853  switch(m_2ndVectorMode)
11854  {
11855  case SECOND_VECTOR_EMPTY:
11856  // First allocation from second part ring buffer.
11857  VMA_ASSERT(suballocations2nd.empty());
11858  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
11859  break;
11860  case SECOND_VECTOR_RING_BUFFER:
11861  // 2-part ring buffer is already started.
11862  VMA_ASSERT(!suballocations2nd.empty());
11863  break;
11864  case SECOND_VECTOR_DOUBLE_STACK:
11865  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
11866  break;
11867  default:
11868  VMA_ASSERT(0);
11869  }
11870 
11871  suballocations2nd.push_back(newSuballoc);
11872  }
11873  break;
11874  default:
11875  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
11876  }
11877 
11878  m_SumFreeSize -= newSuballoc.size;
11879 }
11880 
11881 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
11882 {
11883  FreeAtOffset(allocation->GetOffset());
11884 }
11885 
11886 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
11887 {
11888  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11889  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11890 
11891  if(!suballocations1st.empty())
11892  {
11893  // First allocation: Mark it as next empty at the beginning.
11894  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
11895  if(firstSuballoc.offset == offset)
11896  {
11897  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11898  firstSuballoc.hAllocation = VK_NULL_HANDLE;
11899  m_SumFreeSize += firstSuballoc.size;
11900  ++m_1stNullItemsBeginCount;
11901  CleanupAfterFree();
11902  return;
11903  }
11904  }
11905 
11906  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
11907  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
11908  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11909  {
11910  VmaSuballocation& lastSuballoc = suballocations2nd.back();
11911  if(lastSuballoc.offset == offset)
11912  {
11913  m_SumFreeSize += lastSuballoc.size;
11914  suballocations2nd.pop_back();
11915  CleanupAfterFree();
11916  return;
11917  }
11918  }
11919  // Last allocation in 1st vector.
11920  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
11921  {
11922  VmaSuballocation& lastSuballoc = suballocations1st.back();
11923  if(lastSuballoc.offset == offset)
11924  {
11925  m_SumFreeSize += lastSuballoc.size;
11926  suballocations1st.pop_back();
11927  CleanupAfterFree();
11928  return;
11929  }
11930  }
11931 
11932  // Item from the middle of 1st vector.
11933  {
11934  VmaSuballocation refSuballoc;
11935  refSuballoc.offset = offset;
11936  // Rest of members stays uninitialized intentionally for better performance.
11937  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11938  suballocations1st.begin() + m_1stNullItemsBeginCount,
11939  suballocations1st.end(),
11940  refSuballoc,
11941  VmaSuballocationOffsetLess());
11942  if(it != suballocations1st.end())
11943  {
11944  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11945  it->hAllocation = VK_NULL_HANDLE;
11946  ++m_1stNullItemsMiddleCount;
11947  m_SumFreeSize += it->size;
11948  CleanupAfterFree();
11949  return;
11950  }
11951  }
11952 
11953  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11954  {
11955  // Item from the middle of 2nd vector.
11956  VmaSuballocation refSuballoc;
11957  refSuballoc.offset = offset;
11958  // Rest of members stays uninitialized intentionally for better performance.
11959  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11960  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11961  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11962  if(it != suballocations2nd.end())
11963  {
11964  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11965  it->hAllocation = VK_NULL_HANDLE;
11966  ++m_2ndNullItemsCount;
11967  m_SumFreeSize += it->size;
11968  CleanupAfterFree();
11969  return;
11970  }
11971  }
11972 
11973  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11974 }
11975 
11976 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11977 {
11978  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11979  const size_t suballocCount = AccessSuballocations1st().size();
11980  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11981 }
11982 
11983 void VmaBlockMetadata_Linear::CleanupAfterFree()
11984 {
11985  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11986  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11987 
11988  if(IsEmpty())
11989  {
11990  suballocations1st.clear();
11991  suballocations2nd.clear();
11992  m_1stNullItemsBeginCount = 0;
11993  m_1stNullItemsMiddleCount = 0;
11994  m_2ndNullItemsCount = 0;
11995  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11996  }
11997  else
11998  {
11999  const size_t suballoc1stCount = suballocations1st.size();
12000  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
12001  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
12002 
12003  // Find more null items at the beginning of 1st vector.
12004  while(m_1stNullItemsBeginCount < suballoc1stCount &&
12005  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
12006  {
12007  ++m_1stNullItemsBeginCount;
12008  --m_1stNullItemsMiddleCount;
12009  }
12010 
12011  // Find more null items at the end of 1st vector.
12012  while(m_1stNullItemsMiddleCount > 0 &&
12013  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
12014  {
12015  --m_1stNullItemsMiddleCount;
12016  suballocations1st.pop_back();
12017  }
12018 
12019  // Find more null items at the end of 2nd vector.
12020  while(m_2ndNullItemsCount > 0 &&
12021  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
12022  {
12023  --m_2ndNullItemsCount;
12024  suballocations2nd.pop_back();
12025  }
12026 
12027  // Find more null items at the beginning of 2nd vector.
12028  while(m_2ndNullItemsCount > 0 &&
12029  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
12030  {
12031  --m_2ndNullItemsCount;
12032  VmaVectorRemove(suballocations2nd, 0);
12033  }
12034 
12035  if(ShouldCompact1st())
12036  {
12037  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
12038  size_t srcIndex = m_1stNullItemsBeginCount;
12039  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
12040  {
12041  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
12042  {
12043  ++srcIndex;
12044  }
12045  if(dstIndex != srcIndex)
12046  {
12047  suballocations1st[dstIndex] = suballocations1st[srcIndex];
12048  }
12049  ++srcIndex;
12050  }
12051  suballocations1st.resize(nonNullItemCount);
12052  m_1stNullItemsBeginCount = 0;
12053  m_1stNullItemsMiddleCount = 0;
12054  }
12055 
12056  // 2nd vector became empty.
12057  if(suballocations2nd.empty())
12058  {
12059  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
12060  }
12061 
12062  // 1st vector became empty.
12063  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
12064  {
12065  suballocations1st.clear();
12066  m_1stNullItemsBeginCount = 0;
12067 
12068  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
12069  {
12070  // Swap 1st with 2nd. Now 2nd is empty.
12071  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
12072  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
12073  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
12074  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
12075  {
12076  ++m_1stNullItemsBeginCount;
12077  --m_1stNullItemsMiddleCount;
12078  }
12079  m_2ndNullItemsCount = 0;
12080  m_1stVectorIndex ^= 1;
12081  }
12082  }
12083  }
12084 
12085  VMA_HEAVY_ASSERT(Validate());
12086 }
12087 
12088 
12090 // class VmaBlockMetadata_Buddy
12091 
12092 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
12093  VmaBlockMetadata(hAllocator),
12094  m_Root(VMA_NULL),
12095  m_AllocationCount(0),
12096  m_FreeCount(1),
12097  m_SumFreeSize(0)
12098 {
12099  memset(m_FreeList, 0, sizeof(m_FreeList));
12100 }
12101 
12102 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
12103 {
12104  DeleteNode(m_Root);
12105 }
12106 
12107 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
12108 {
12109  VmaBlockMetadata::Init(size);
12110 
12111  m_UsableSize = VmaPrevPow2(size);
12112  m_SumFreeSize = m_UsableSize;
12113 
12114  // Calculate m_LevelCount.
12115  m_LevelCount = 1;
12116  while(m_LevelCount < MAX_LEVELS &&
12117  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
12118  {
12119  ++m_LevelCount;
12120  }
12121 
12122  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
12123  rootNode->offset = 0;
12124  rootNode->type = Node::TYPE_FREE;
12125  rootNode->parent = VMA_NULL;
12126  rootNode->buddy = VMA_NULL;
12127 
12128  m_Root = rootNode;
12129  AddToFreeListFront(0, rootNode);
12130 }
12131 
12132 bool VmaBlockMetadata_Buddy::Validate() const
12133 {
12134  // Validate tree.
12135  ValidationContext ctx;
12136  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
12137  {
12138  VMA_VALIDATE(false && "ValidateNode failed.");
12139  }
12140  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
12141  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
12142 
12143  // Validate free node lists.
12144  for(uint32_t level = 0; level < m_LevelCount; ++level)
12145  {
12146  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
12147  m_FreeList[level].front->free.prev == VMA_NULL);
12148 
12149  for(Node* node = m_FreeList[level].front;
12150  node != VMA_NULL;
12151  node = node->free.next)
12152  {
12153  VMA_VALIDATE(node->type == Node::TYPE_FREE);
12154 
12155  if(node->free.next == VMA_NULL)
12156  {
12157  VMA_VALIDATE(m_FreeList[level].back == node);
12158  }
12159  else
12160  {
12161  VMA_VALIDATE(node->free.next->free.prev == node);
12162  }
12163  }
12164  }
12165 
12166  // Validate that free lists ar higher levels are empty.
12167  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
12168  {
12169  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
12170  }
12171 
12172  return true;
12173 }
12174 
12175 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
12176 {
12177  for(uint32_t level = 0; level < m_LevelCount; ++level)
12178  {
12179  if(m_FreeList[level].front != VMA_NULL)
12180  {
12181  return LevelToNodeSize(level);
12182  }
12183  }
12184  return 0;
12185 }
12186 
12187 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
12188 {
12189  const VkDeviceSize unusableSize = GetUnusableSize();
12190 
12191  outInfo.blockCount = 1;
12192 
12193  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
12194  outInfo.usedBytes = outInfo.unusedBytes = 0;
12195 
12196  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
12197  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
12198  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
12199 
12200  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
12201 
12202  if(unusableSize > 0)
12203  {
12204  ++outInfo.unusedRangeCount;
12205  outInfo.unusedBytes += unusableSize;
12206  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
12207  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
12208  }
12209 }
12210 
12211 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
12212 {
12213  const VkDeviceSize unusableSize = GetUnusableSize();
12214 
12215  inoutStats.size += GetSize();
12216  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
12217  inoutStats.allocationCount += m_AllocationCount;
12218  inoutStats.unusedRangeCount += m_FreeCount;
12219  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
12220 
12221  if(unusableSize > 0)
12222  {
12223  ++inoutStats.unusedRangeCount;
12224  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
12225  }
12226 }
12227 
12228 #if VMA_STATS_STRING_ENABLED
12229 
12230 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
12231 {
12232  // TODO optimize
12233  VmaStatInfo stat;
12234  CalcAllocationStatInfo(stat);
12235 
12236  PrintDetailedMap_Begin(
12237  json,
12238  stat.unusedBytes,
12239  stat.allocationCount,
12240  stat.unusedRangeCount);
12241 
12242  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
12243 
12244  const VkDeviceSize unusableSize = GetUnusableSize();
12245  if(unusableSize > 0)
12246  {
12247  PrintDetailedMap_UnusedRange(json,
12248  m_UsableSize, // offset
12249  unusableSize); // size
12250  }
12251 
12252  PrintDetailedMap_End(json);
12253 }
12254 
12255 #endif // #if VMA_STATS_STRING_ENABLED
12256 
12257 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
12258  uint32_t currentFrameIndex,
12259  uint32_t frameInUseCount,
12260  VkDeviceSize bufferImageGranularity,
12261  VkDeviceSize allocSize,
12262  VkDeviceSize allocAlignment,
12263  bool upperAddress,
12264  VmaSuballocationType allocType,
12265  bool canMakeOtherLost,
12266  uint32_t strategy,
12267  VmaAllocationRequest* pAllocationRequest)
12268 {
12269  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
12270 
12271  // Simple way to respect bufferImageGranularity. May be optimized some day.
12272  // Whenever it might be an OPTIMAL image...
12273  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
12274  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
12275  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
12276  {
12277  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
12278  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
12279  }
12280 
12281  if(allocSize > m_UsableSize)
12282  {
12283  return false;
12284  }
12285 
12286  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
12287  for(uint32_t level = targetLevel + 1; level--; )
12288  {
12289  for(Node* freeNode = m_FreeList[level].front;
12290  freeNode != VMA_NULL;
12291  freeNode = freeNode->free.next)
12292  {
12293  if(freeNode->offset % allocAlignment == 0)
12294  {
12295  pAllocationRequest->type = VmaAllocationRequestType::Normal;
12296  pAllocationRequest->offset = freeNode->offset;
12297  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
12298  pAllocationRequest->sumItemSize = 0;
12299  pAllocationRequest->itemsToMakeLostCount = 0;
12300  pAllocationRequest->customData = (void*)(uintptr_t)level;
12301  return true;
12302  }
12303  }
12304  }
12305 
12306  return false;
12307 }
12308 
12309 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
12310  uint32_t currentFrameIndex,
12311  uint32_t frameInUseCount,
12312  VmaAllocationRequest* pAllocationRequest)
12313 {
12314  /*
12315  Lost allocations are not supported in buddy allocator at the moment.
12316  Support might be added in the future.
12317  */
12318  return pAllocationRequest->itemsToMakeLostCount == 0;
12319 }
12320 
12321 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
12322 {
12323  /*
12324  Lost allocations are not supported in buddy allocator at the moment.
12325  Support might be added in the future.
12326  */
12327  return 0;
12328 }
12329 
12330 void VmaBlockMetadata_Buddy::Alloc(
12331  const VmaAllocationRequest& request,
12332  VmaSuballocationType type,
12333  VkDeviceSize allocSize,
12334  VmaAllocation hAllocation)
12335 {
12336  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
12337 
12338  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
12339  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
12340 
12341  Node* currNode = m_FreeList[currLevel].front;
12342  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
12343  while(currNode->offset != request.offset)
12344  {
12345  currNode = currNode->free.next;
12346  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
12347  }
12348 
12349  // Go down, splitting free nodes.
12350  while(currLevel < targetLevel)
12351  {
12352  // currNode is already first free node at currLevel.
12353  // Remove it from list of free nodes at this currLevel.
12354  RemoveFromFreeList(currLevel, currNode);
12355 
12356  const uint32_t childrenLevel = currLevel + 1;
12357 
12358  // Create two free sub-nodes.
12359  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
12360  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
12361 
12362  leftChild->offset = currNode->offset;
12363  leftChild->type = Node::TYPE_FREE;
12364  leftChild->parent = currNode;
12365  leftChild->buddy = rightChild;
12366 
12367  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
12368  rightChild->type = Node::TYPE_FREE;
12369  rightChild->parent = currNode;
12370  rightChild->buddy = leftChild;
12371 
12372  // Convert current currNode to split type.
12373  currNode->type = Node::TYPE_SPLIT;
12374  currNode->split.leftChild = leftChild;
12375 
12376  // Add child nodes to free list. Order is important!
12377  AddToFreeListFront(childrenLevel, rightChild);
12378  AddToFreeListFront(childrenLevel, leftChild);
12379 
12380  ++m_FreeCount;
12381  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
12382  ++currLevel;
12383  currNode = m_FreeList[currLevel].front;
12384 
12385  /*
12386  We can be sure that currNode, as left child of node previously split,
12387  also fullfills the alignment requirement.
12388  */
12389  }
12390 
12391  // Remove from free list.
12392  VMA_ASSERT(currLevel == targetLevel &&
12393  currNode != VMA_NULL &&
12394  currNode->type == Node::TYPE_FREE);
12395  RemoveFromFreeList(currLevel, currNode);
12396 
12397  // Convert to allocation node.
12398  currNode->type = Node::TYPE_ALLOCATION;
12399  currNode->allocation.alloc = hAllocation;
12400 
12401  ++m_AllocationCount;
12402  --m_FreeCount;
12403  m_SumFreeSize -= allocSize;
12404 }
12405 
12406 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
12407 {
12408  if(node->type == Node::TYPE_SPLIT)
12409  {
12410  DeleteNode(node->split.leftChild->buddy);
12411  DeleteNode(node->split.leftChild);
12412  }
12413 
12414  vma_delete(GetAllocationCallbacks(), node);
12415 }
12416 
12417 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
12418 {
12419  VMA_VALIDATE(level < m_LevelCount);
12420  VMA_VALIDATE(curr->parent == parent);
12421  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
12422  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
12423  switch(curr->type)
12424  {
12425  case Node::TYPE_FREE:
12426  // curr->free.prev, next are validated separately.
12427  ctx.calculatedSumFreeSize += levelNodeSize;
12428  ++ctx.calculatedFreeCount;
12429  break;
12430  case Node::TYPE_ALLOCATION:
12431  ++ctx.calculatedAllocationCount;
12432  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
12433  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
12434  break;
12435  case Node::TYPE_SPLIT:
12436  {
12437  const uint32_t childrenLevel = level + 1;
12438  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
12439  const Node* const leftChild = curr->split.leftChild;
12440  VMA_VALIDATE(leftChild != VMA_NULL);
12441  VMA_VALIDATE(leftChild->offset == curr->offset);
12442  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
12443  {
12444  VMA_VALIDATE(false && "ValidateNode for left child failed.");
12445  }
12446  const Node* const rightChild = leftChild->buddy;
12447  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
12448  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
12449  {
12450  VMA_VALIDATE(false && "ValidateNode for right child failed.");
12451  }
12452  }
12453  break;
12454  default:
12455  return false;
12456  }
12457 
12458  return true;
12459 }
12460 
12461 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
12462 {
12463  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
12464  uint32_t level = 0;
12465  VkDeviceSize currLevelNodeSize = m_UsableSize;
12466  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
12467  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
12468  {
12469  ++level;
12470  currLevelNodeSize = nextLevelNodeSize;
12471  nextLevelNodeSize = currLevelNodeSize >> 1;
12472  }
12473  return level;
12474 }
12475 
12476 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
12477 {
12478  // Find node and level.
12479  Node* node = m_Root;
12480  VkDeviceSize nodeOffset = 0;
12481  uint32_t level = 0;
12482  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
12483  while(node->type == Node::TYPE_SPLIT)
12484  {
12485  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
12486  if(offset < nodeOffset + nextLevelSize)
12487  {
12488  node = node->split.leftChild;
12489  }
12490  else
12491  {
12492  node = node->split.leftChild->buddy;
12493  nodeOffset += nextLevelSize;
12494  }
12495  ++level;
12496  levelNodeSize = nextLevelSize;
12497  }
12498 
12499  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
12500  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
12501 
12502  ++m_FreeCount;
12503  --m_AllocationCount;
12504  m_SumFreeSize += alloc->GetSize();
12505 
12506  node->type = Node::TYPE_FREE;
12507 
12508  // Join free nodes if possible.
12509  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
12510  {
12511  RemoveFromFreeList(level, node->buddy);
12512  Node* const parent = node->parent;
12513 
12514  vma_delete(GetAllocationCallbacks(), node->buddy);
12515  vma_delete(GetAllocationCallbacks(), node);
12516  parent->type = Node::TYPE_FREE;
12517 
12518  node = parent;
12519  --level;
12520  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
12521  --m_FreeCount;
12522  }
12523 
12524  AddToFreeListFront(level, node);
12525 }
12526 
12527 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
12528 {
12529  switch(node->type)
12530  {
12531  case Node::TYPE_FREE:
12532  ++outInfo.unusedRangeCount;
12533  outInfo.unusedBytes += levelNodeSize;
12534  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
12535  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
12536  break;
12537  case Node::TYPE_ALLOCATION:
12538  {
12539  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12540  ++outInfo.allocationCount;
12541  outInfo.usedBytes += allocSize;
12542  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
12543  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
12544 
12545  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
12546  if(unusedRangeSize > 0)
12547  {
12548  ++outInfo.unusedRangeCount;
12549  outInfo.unusedBytes += unusedRangeSize;
12550  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
12551  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
12552  }
12553  }
12554  break;
12555  case Node::TYPE_SPLIT:
12556  {
12557  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12558  const Node* const leftChild = node->split.leftChild;
12559  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
12560  const Node* const rightChild = leftChild->buddy;
12561  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
12562  }
12563  break;
12564  default:
12565  VMA_ASSERT(0);
12566  }
12567 }
12568 
12569 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
12570 {
12571  VMA_ASSERT(node->type == Node::TYPE_FREE);
12572 
12573  // List is empty.
12574  Node* const frontNode = m_FreeList[level].front;
12575  if(frontNode == VMA_NULL)
12576  {
12577  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
12578  node->free.prev = node->free.next = VMA_NULL;
12579  m_FreeList[level].front = m_FreeList[level].back = node;
12580  }
12581  else
12582  {
12583  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
12584  node->free.prev = VMA_NULL;
12585  node->free.next = frontNode;
12586  frontNode->free.prev = node;
12587  m_FreeList[level].front = node;
12588  }
12589 }
12590 
12591 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
12592 {
12593  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
12594 
12595  // It is at the front.
12596  if(node->free.prev == VMA_NULL)
12597  {
12598  VMA_ASSERT(m_FreeList[level].front == node);
12599  m_FreeList[level].front = node->free.next;
12600  }
12601  else
12602  {
12603  Node* const prevFreeNode = node->free.prev;
12604  VMA_ASSERT(prevFreeNode->free.next == node);
12605  prevFreeNode->free.next = node->free.next;
12606  }
12607 
12608  // It is at the back.
12609  if(node->free.next == VMA_NULL)
12610  {
12611  VMA_ASSERT(m_FreeList[level].back == node);
12612  m_FreeList[level].back = node->free.prev;
12613  }
12614  else
12615  {
12616  Node* const nextFreeNode = node->free.next;
12617  VMA_ASSERT(nextFreeNode->free.prev == node);
12618  nextFreeNode->free.prev = node->free.prev;
12619  }
12620 }
12621 
12622 #if VMA_STATS_STRING_ENABLED
12623 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
12624 {
12625  switch(node->type)
12626  {
12627  case Node::TYPE_FREE:
12628  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
12629  break;
12630  case Node::TYPE_ALLOCATION:
12631  {
12632  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
12633  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12634  if(allocSize < levelNodeSize)
12635  {
12636  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
12637  }
12638  }
12639  break;
12640  case Node::TYPE_SPLIT:
12641  {
12642  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12643  const Node* const leftChild = node->split.leftChild;
12644  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
12645  const Node* const rightChild = leftChild->buddy;
12646  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
12647  }
12648  break;
12649  default:
12650  VMA_ASSERT(0);
12651  }
12652 }
12653 #endif // #if VMA_STATS_STRING_ENABLED
12654 
12655 
12657 // class VmaDeviceMemoryBlock
12658 
12659 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
12660  m_pMetadata(VMA_NULL),
12661  m_MemoryTypeIndex(UINT32_MAX),
12662  m_Id(0),
12663  m_hMemory(VK_NULL_HANDLE),
12664  m_MapCount(0),
12665  m_pMappedData(VMA_NULL)
12666 {
12667 }
12668 
12669 void VmaDeviceMemoryBlock::Init(
12670  VmaAllocator hAllocator,
12671  VmaPool hParentPool,
12672  uint32_t newMemoryTypeIndex,
12673  VkDeviceMemory newMemory,
12674  VkDeviceSize newSize,
12675  uint32_t id,
12676  uint32_t algorithm)
12677 {
12678  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
12679 
12680  m_hParentPool = hParentPool;
12681  m_MemoryTypeIndex = newMemoryTypeIndex;
12682  m_Id = id;
12683  m_hMemory = newMemory;
12684 
12685  switch(algorithm)
12686  {
12688  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
12689  break;
12691  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
12692  break;
12693  default:
12694  VMA_ASSERT(0);
12695  // Fall-through.
12696  case 0:
12697  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
12698  }
12699  m_pMetadata->Init(newSize);
12700 }
12701 
12702 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
12703 {
12704  // This is the most important assert in the entire library.
12705  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
12706  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
12707 
12708  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
12709  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
12710  m_hMemory = VK_NULL_HANDLE;
12711 
12712  vma_delete(allocator, m_pMetadata);
12713  m_pMetadata = VMA_NULL;
12714 }
12715 
12716 bool VmaDeviceMemoryBlock::Validate() const
12717 {
12718  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
12719  (m_pMetadata->GetSize() != 0));
12720 
12721  return m_pMetadata->Validate();
12722 }
12723 
12724 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
12725 {
12726  void* pData = nullptr;
12727  VkResult res = Map(hAllocator, 1, &pData);
12728  if(res != VK_SUCCESS)
12729  {
12730  return res;
12731  }
12732 
12733  res = m_pMetadata->CheckCorruption(pData);
12734 
12735  Unmap(hAllocator, 1);
12736 
12737  return res;
12738 }
12739 
12740 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
12741 {
12742  if(count == 0)
12743  {
12744  return VK_SUCCESS;
12745  }
12746 
12747  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12748  if(m_MapCount != 0)
12749  {
12750  m_MapCount += count;
12751  VMA_ASSERT(m_pMappedData != VMA_NULL);
12752  if(ppData != VMA_NULL)
12753  {
12754  *ppData = m_pMappedData;
12755  }
12756  return VK_SUCCESS;
12757  }
12758  else
12759  {
12760  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
12761  hAllocator->m_hDevice,
12762  m_hMemory,
12763  0, // offset
12764  VK_WHOLE_SIZE,
12765  0, // flags
12766  &m_pMappedData);
12767  if(result == VK_SUCCESS)
12768  {
12769  if(ppData != VMA_NULL)
12770  {
12771  *ppData = m_pMappedData;
12772  }
12773  m_MapCount = count;
12774  }
12775  return result;
12776  }
12777 }
12778 
12779 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
12780 {
12781  if(count == 0)
12782  {
12783  return;
12784  }
12785 
12786  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12787  if(m_MapCount >= count)
12788  {
12789  m_MapCount -= count;
12790  if(m_MapCount == 0)
12791  {
12792  m_pMappedData = VMA_NULL;
12793  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
12794  }
12795  }
12796  else
12797  {
12798  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
12799  }
12800 }
12801 
12802 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12803 {
12804  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12805  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12806 
12807  void* pData;
12808  VkResult res = Map(hAllocator, 1, &pData);
12809  if(res != VK_SUCCESS)
12810  {
12811  return res;
12812  }
12813 
12814  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
12815  VmaWriteMagicValue(pData, allocOffset + allocSize);
12816 
12817  Unmap(hAllocator, 1);
12818 
12819  return VK_SUCCESS;
12820 }
12821 
12822 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12823 {
12824  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12825  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12826 
12827  void* pData;
12828  VkResult res = Map(hAllocator, 1, &pData);
12829  if(res != VK_SUCCESS)
12830  {
12831  return res;
12832  }
12833 
12834  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
12835  {
12836  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
12837  }
12838  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
12839  {
12840  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
12841  }
12842 
12843  Unmap(hAllocator, 1);
12844 
12845  return VK_SUCCESS;
12846 }
12847 
12848 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
12849  const VmaAllocator hAllocator,
12850  const VmaAllocation hAllocation,
12851  VkDeviceSize allocationLocalOffset,
12852  VkBuffer hBuffer,
12853  const void* pNext)
12854 {
12855  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12856  hAllocation->GetBlock() == this);
12857  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12858  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12859  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12860  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12861  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12862  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
12863 }
12864 
12865 VkResult VmaDeviceMemoryBlock::BindImageMemory(
12866  const VmaAllocator hAllocator,
12867  const VmaAllocation hAllocation,
12868  VkDeviceSize allocationLocalOffset,
12869  VkImage hImage,
12870  const void* pNext)
12871 {
12872  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12873  hAllocation->GetBlock() == this);
12874  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12875  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12876  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12877  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12878  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12879  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
12880 }
12881 
12882 static void InitStatInfo(VmaStatInfo& outInfo)
12883 {
12884  memset(&outInfo, 0, sizeof(outInfo));
12885  outInfo.allocationSizeMin = UINT64_MAX;
12886  outInfo.unusedRangeSizeMin = UINT64_MAX;
12887 }
12888 
12889 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
12890 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
12891 {
12892  inoutInfo.blockCount += srcInfo.blockCount;
12893  inoutInfo.allocationCount += srcInfo.allocationCount;
12894  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
12895  inoutInfo.usedBytes += srcInfo.usedBytes;
12896  inoutInfo.unusedBytes += srcInfo.unusedBytes;
12897  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
12898  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
12899  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
12900  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
12901 }
12902 
12903 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
12904 {
12905  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
12906  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
12907  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
12908  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
12909 }
12910 
12911 VmaPool_T::VmaPool_T(
12912  VmaAllocator hAllocator,
12913  const VmaPoolCreateInfo& createInfo,
12914  VkDeviceSize preferredBlockSize) :
12915  m_BlockVector(
12916  hAllocator,
12917  this, // hParentPool
12918  createInfo.memoryTypeIndex,
12919  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
12920  createInfo.minBlockCount,
12921  createInfo.maxBlockCount,
12922  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
12923  createInfo.frameInUseCount,
12924  createInfo.blockSize != 0, // explicitBlockSize
12925  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
12926  createInfo.priority,
12927  VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
12928  createInfo.pMemoryAllocateNext),
12929  m_Id(0),
12930  m_Name(VMA_NULL)
12931 {
12932 }
12933 
12934 VmaPool_T::~VmaPool_T()
12935 {
12936  VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
12937 }
12938 
12939 void VmaPool_T::SetName(const char* pName)
12940 {
12941  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12942  VmaFreeString(allocs, m_Name);
12943 
12944  if(pName != VMA_NULL)
12945  {
12946  m_Name = VmaCreateStringCopy(allocs, pName);
12947  }
12948  else
12949  {
12950  m_Name = VMA_NULL;
12951  }
12952 }
12953 
12954 #if VMA_STATS_STRING_ENABLED
12955 
12956 #endif // #if VMA_STATS_STRING_ENABLED
12957 
12958 VmaBlockVector::VmaBlockVector(
12959  VmaAllocator hAllocator,
12960  VmaPool hParentPool,
12961  uint32_t memoryTypeIndex,
12962  VkDeviceSize preferredBlockSize,
12963  size_t minBlockCount,
12964  size_t maxBlockCount,
12965  VkDeviceSize bufferImageGranularity,
12966  uint32_t frameInUseCount,
12967  bool explicitBlockSize,
12968  uint32_t algorithm,
12969  float priority,
12970  VkDeviceSize minAllocationAlignment,
12971  void* pMemoryAllocateNext) :
12972  m_hAllocator(hAllocator),
12973  m_hParentPool(hParentPool),
12974  m_MemoryTypeIndex(memoryTypeIndex),
12975  m_PreferredBlockSize(preferredBlockSize),
12976  m_MinBlockCount(minBlockCount),
12977  m_MaxBlockCount(maxBlockCount),
12978  m_BufferImageGranularity(bufferImageGranularity),
12979  m_FrameInUseCount(frameInUseCount),
12980  m_ExplicitBlockSize(explicitBlockSize),
12981  m_Algorithm(algorithm),
12982  m_Priority(priority),
12983  m_MinAllocationAlignment(minAllocationAlignment),
12984  m_pMemoryAllocateNext(pMemoryAllocateNext),
12985  m_HasEmptyBlock(false),
12986  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12987  m_NextBlockId(0)
12988 {
12989 }
12990 
12991 VmaBlockVector::~VmaBlockVector()
12992 {
12993  for(size_t i = m_Blocks.size(); i--; )
12994  {
12995  m_Blocks[i]->Destroy(m_hAllocator);
12996  vma_delete(m_hAllocator, m_Blocks[i]);
12997  }
12998 }
12999 
13000 VkResult VmaBlockVector::CreateMinBlocks()
13001 {
13002  for(size_t i = 0; i < m_MinBlockCount; ++i)
13003  {
13004  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
13005  if(res != VK_SUCCESS)
13006  {
13007  return res;
13008  }
13009  }
13010  return VK_SUCCESS;
13011 }
13012 
13013 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
13014 {
13015  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13016 
13017  const size_t blockCount = m_Blocks.size();
13018 
13019  pStats->size = 0;
13020  pStats->unusedSize = 0;
13021  pStats->allocationCount = 0;
13022  pStats->unusedRangeCount = 0;
13023  pStats->unusedRangeSizeMax = 0;
13024  pStats->blockCount = blockCount;
13025 
13026  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13027  {
13028  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13029  VMA_ASSERT(pBlock);
13030  VMA_HEAVY_ASSERT(pBlock->Validate());
13031  pBlock->m_pMetadata->AddPoolStats(*pStats);
13032  }
13033 }
13034 
13035 bool VmaBlockVector::IsEmpty()
13036 {
13037  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13038  return m_Blocks.empty();
13039 }
13040 
13041 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
13042 {
13043  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13044  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
13045  (VMA_DEBUG_MARGIN > 0) &&
13046  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
13047  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
13048 }
13049 
13050 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
13051 
13052 VkResult VmaBlockVector::Allocate(
13053  uint32_t currentFrameIndex,
13054  VkDeviceSize size,
13055  VkDeviceSize alignment,
13056  const VmaAllocationCreateInfo& createInfo,
13057  VmaSuballocationType suballocType,
13058  size_t allocationCount,
13059  VmaAllocation* pAllocations)
13060 {
13061  size_t allocIndex;
13062  VkResult res = VK_SUCCESS;
13063 
13064  alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
13065 
13066  if(IsCorruptionDetectionEnabled())
13067  {
13068  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
13069  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
13070  }
13071 
13072  {
13073  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13074  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13075  {
13076  res = AllocatePage(
13077  currentFrameIndex,
13078  size,
13079  alignment,
13080  createInfo,
13081  suballocType,
13082  pAllocations + allocIndex);
13083  if(res != VK_SUCCESS)
13084  {
13085  break;
13086  }
13087  }
13088  }
13089 
13090  if(res != VK_SUCCESS)
13091  {
13092  // Free all already created allocations.
13093  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13094  while(allocIndex--)
13095  {
13096  VmaAllocation_T* const alloc = pAllocations[allocIndex];
13097  const VkDeviceSize allocSize = alloc->GetSize();
13098  Free(alloc);
13099  m_hAllocator->m_Budget.RemoveAllocation(heapIndex, allocSize);
13100  }
13101  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
13102  }
13103 
13104  return res;
13105 }
13106 
13107 VkResult VmaBlockVector::AllocatePage(
13108  uint32_t currentFrameIndex,
13109  VkDeviceSize size,
13110  VkDeviceSize alignment,
13111  const VmaAllocationCreateInfo& createInfo,
13112  VmaSuballocationType suballocType,
13113  VmaAllocation* pAllocation)
13114 {
13115  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13116  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
13117  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13118  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13119 
13120  VkDeviceSize freeMemory;
13121  {
13122  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13123  VmaBudget heapBudget = {};
13124  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
13125  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
13126  }
13127 
13128  const bool canFallbackToDedicated = !IsCustomPool();
13129  const bool canCreateNewBlock =
13130  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
13131  (m_Blocks.size() < m_MaxBlockCount) &&
13132  (freeMemory >= size || !canFallbackToDedicated);
13133  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
13134 
13135  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
13136  // Which in turn is available only when maxBlockCount = 1.
13137  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
13138  {
13139  canMakeOtherLost = false;
13140  }
13141 
13142  // Upper address can only be used with linear allocator and within single memory block.
13143  if(isUpperAddress &&
13144  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
13145  {
13146  return VK_ERROR_FEATURE_NOT_PRESENT;
13147  }
13148 
13149  // Validate strategy.
13150  switch(strategy)
13151  {
13152  case 0:
13154  break;
13158  break;
13159  default:
13160  return VK_ERROR_FEATURE_NOT_PRESENT;
13161  }
13162 
13163  // Early reject: requested allocation size is larger that maximum block size for this block vector.
13164  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
13165  {
13166  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13167  }
13168 
13169  /*
13170  Under certain condition, this whole section can be skipped for optimization, so
13171  we move on directly to trying to allocate with canMakeOtherLost. That's the case
13172  e.g. for custom pools with linear algorithm.
13173  */
13174  if(!canMakeOtherLost || canCreateNewBlock)
13175  {
13176  // 1. Search existing allocations. Try to allocate without making other allocations lost.
13177  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
13179 
13180  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13181  {
13182  // Use only last block.
13183  if(!m_Blocks.empty())
13184  {
13185  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
13186  VMA_ASSERT(pCurrBlock);
13187  VkResult res = AllocateFromBlock(
13188  pCurrBlock,
13189  currentFrameIndex,
13190  size,
13191  alignment,
13192  allocFlagsCopy,
13193  createInfo.pUserData,
13194  suballocType,
13195  strategy,
13196  pAllocation);
13197  if(res == VK_SUCCESS)
13198  {
13199  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
13200  return VK_SUCCESS;
13201  }
13202  }
13203  }
13204  else
13205  {
13207  {
13208  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
13209  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
13210  {
13211  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13212  VMA_ASSERT(pCurrBlock);
13213  VkResult res = AllocateFromBlock(
13214  pCurrBlock,
13215  currentFrameIndex,
13216  size,
13217  alignment,
13218  allocFlagsCopy,
13219  createInfo.pUserData,
13220  suballocType,
13221  strategy,
13222  pAllocation);
13223  if(res == VK_SUCCESS)
13224  {
13225  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
13226  return VK_SUCCESS;
13227  }
13228  }
13229  }
13230  else // WORST_FIT, FIRST_FIT
13231  {
13232  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
13233  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13234  {
13235  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13236  VMA_ASSERT(pCurrBlock);
13237  VkResult res = AllocateFromBlock(
13238  pCurrBlock,
13239  currentFrameIndex,
13240  size,
13241  alignment,
13242  allocFlagsCopy,
13243  createInfo.pUserData,
13244  suballocType,
13245  strategy,
13246  pAllocation);
13247  if(res == VK_SUCCESS)
13248  {
13249  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
13250  return VK_SUCCESS;
13251  }
13252  }
13253  }
13254  }
13255 
13256  // 2. Try to create new block.
13257  if(canCreateNewBlock)
13258  {
13259  // Calculate optimal size for new block.
13260  VkDeviceSize newBlockSize = m_PreferredBlockSize;
13261  uint32_t newBlockSizeShift = 0;
13262  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
13263 
13264  if(!m_ExplicitBlockSize)
13265  {
13266  // Allocate 1/8, 1/4, 1/2 as first blocks.
13267  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
13268  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
13269  {
13270  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
13271  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
13272  {
13273  newBlockSize = smallerNewBlockSize;
13274  ++newBlockSizeShift;
13275  }
13276  else
13277  {
13278  break;
13279  }
13280  }
13281  }
13282 
13283  size_t newBlockIndex = 0;
13284  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
13285  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
13286  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
13287  if(!m_ExplicitBlockSize)
13288  {
13289  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
13290  {
13291  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
13292  if(smallerNewBlockSize >= size)
13293  {
13294  newBlockSize = smallerNewBlockSize;
13295  ++newBlockSizeShift;
13296  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
13297  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
13298  }
13299  else
13300  {
13301  break;
13302  }
13303  }
13304  }
13305 
13306  if(res == VK_SUCCESS)
13307  {
13308  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
13309  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
13310 
13311  res = AllocateFromBlock(
13312  pBlock,
13313  currentFrameIndex,
13314  size,
13315  alignment,
13316  allocFlagsCopy,
13317  createInfo.pUserData,
13318  suballocType,
13319  strategy,
13320  pAllocation);
13321  if(res == VK_SUCCESS)
13322  {
13323  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
13324  return VK_SUCCESS;
13325  }
13326  else
13327  {
13328  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
13329  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13330  }
13331  }
13332  }
13333  }
13334 
13335  // 3. Try to allocate from existing blocks with making other allocations lost.
13336  if(canMakeOtherLost)
13337  {
13338  uint32_t tryIndex = 0;
13339  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
13340  {
13341  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
13342  VmaAllocationRequest bestRequest = {};
13343  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
13344 
13345  // 1. Search existing allocations.
13347  {
13348  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
13349  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
13350  {
13351  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13352  VMA_ASSERT(pCurrBlock);
13353  VmaAllocationRequest currRequest = {};
13354  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
13355  currentFrameIndex,
13356  m_FrameInUseCount,
13357  m_BufferImageGranularity,
13358  size,
13359  alignment,
13360  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
13361  suballocType,
13362  canMakeOtherLost,
13363  strategy,
13364  &currRequest))
13365  {
13366  const VkDeviceSize currRequestCost = currRequest.CalcCost();
13367  if(pBestRequestBlock == VMA_NULL ||
13368  currRequestCost < bestRequestCost)
13369  {
13370  pBestRequestBlock = pCurrBlock;
13371  bestRequest = currRequest;
13372  bestRequestCost = currRequestCost;
13373 
13374  if(bestRequestCost == 0)
13375  {
13376  break;
13377  }
13378  }
13379  }
13380  }
13381  }
13382  else // WORST_FIT, FIRST_FIT
13383  {
13384  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
13385  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13386  {
13387  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13388  VMA_ASSERT(pCurrBlock);
13389  VmaAllocationRequest currRequest = {};
13390  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
13391  currentFrameIndex,
13392  m_FrameInUseCount,
13393  m_BufferImageGranularity,
13394  size,
13395  alignment,
13396  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
13397  suballocType,
13398  canMakeOtherLost,
13399  strategy,
13400  &currRequest))
13401  {
13402  const VkDeviceSize currRequestCost = currRequest.CalcCost();
13403  if(pBestRequestBlock == VMA_NULL ||
13404  currRequestCost < bestRequestCost ||
13406  {
13407  pBestRequestBlock = pCurrBlock;
13408  bestRequest = currRequest;
13409  bestRequestCost = currRequestCost;
13410 
13411  if(bestRequestCost == 0 ||
13413  {
13414  break;
13415  }
13416  }
13417  }
13418  }
13419  }
13420 
13421  if(pBestRequestBlock != VMA_NULL)
13422  {
13423  if(mapped)
13424  {
13425  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
13426  if(res != VK_SUCCESS)
13427  {
13428  return res;
13429  }
13430  }
13431 
13432  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
13433  currentFrameIndex,
13434  m_FrameInUseCount,
13435  &bestRequest))
13436  {
13437  // Allocate from this pBlock.
13438  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13439  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
13440  UpdateHasEmptyBlock();
13441  (*pAllocation)->InitBlockAllocation(
13442  pBestRequestBlock,
13443  bestRequest.offset,
13444  alignment,
13445  size,
13446  m_MemoryTypeIndex,
13447  suballocType,
13448  mapped,
13449  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13450  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
13451  VMA_DEBUG_LOG(" Returned from existing block");
13452  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
13453  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13454  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13455  {
13456  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13457  }
13458  if(IsCorruptionDetectionEnabled())
13459  {
13460  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
13461  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13462  }
13463  return VK_SUCCESS;
13464  }
13465  // else: Some allocations must have been touched while we are here. Next try.
13466  }
13467  else
13468  {
13469  // Could not find place in any of the blocks - break outer loop.
13470  break;
13471  }
13472  }
13473  /* Maximum number of tries exceeded - a very unlike event when many other
13474  threads are simultaneously touching allocations making it impossible to make
13475  lost at the same time as we try to allocate. */
13476  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
13477  {
13478  return VK_ERROR_TOO_MANY_OBJECTS;
13479  }
13480  }
13481 
13482  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13483 }
13484 
13485 void VmaBlockVector::Free(
13486  const VmaAllocation hAllocation)
13487 {
13488  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
13489 
13490  bool budgetExceeded = false;
13491  {
13492  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13493  VmaBudget heapBudget = {};
13494  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
13495  budgetExceeded = heapBudget.usage >= heapBudget.budget;
13496  }
13497 
13498  // Scope for lock.
13499  {
13500  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13501 
13502  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13503 
13504  if(IsCorruptionDetectionEnabled())
13505  {
13506  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
13507  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
13508  }
13509 
13510  if(hAllocation->IsPersistentMap())
13511  {
13512  pBlock->Unmap(m_hAllocator, 1);
13513  }
13514 
13515  pBlock->m_pMetadata->Free(hAllocation);
13516  VMA_HEAVY_ASSERT(pBlock->Validate());
13517 
13518  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
13519 
13520  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
13521  // pBlock became empty after this deallocation.
13522  if(pBlock->m_pMetadata->IsEmpty())
13523  {
13524  // Already has empty block. We don't want to have two, so delete this one.
13525  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
13526  {
13527  pBlockToDelete = pBlock;
13528  Remove(pBlock);
13529  }
13530  // else: We now have an empty block - leave it.
13531  }
13532  // pBlock didn't become empty, but we have another empty block - find and free that one.
13533  // (This is optional, heuristics.)
13534  else if(m_HasEmptyBlock && canDeleteBlock)
13535  {
13536  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
13537  if(pLastBlock->m_pMetadata->IsEmpty())
13538  {
13539  pBlockToDelete = pLastBlock;
13540  m_Blocks.pop_back();
13541  }
13542  }
13543 
13544  UpdateHasEmptyBlock();
13545  IncrementallySortBlocks();
13546  }
13547 
13548  // Destruction of a free block. Deferred until this point, outside of mutex
13549  // lock, for performance reason.
13550  if(pBlockToDelete != VMA_NULL)
13551  {
13552  VMA_DEBUG_LOG(" Deleted empty block");
13553  pBlockToDelete->Destroy(m_hAllocator);
13554  vma_delete(m_hAllocator, pBlockToDelete);
13555  }
13556 }
13557 
13558 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
13559 {
13560  VkDeviceSize result = 0;
13561  for(size_t i = m_Blocks.size(); i--; )
13562  {
13563  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
13564  if(result >= m_PreferredBlockSize)
13565  {
13566  break;
13567  }
13568  }
13569  return result;
13570 }
13571 
13572 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
13573 {
13574  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13575  {
13576  if(m_Blocks[blockIndex] == pBlock)
13577  {
13578  VmaVectorRemove(m_Blocks, blockIndex);
13579  return;
13580  }
13581  }
13582  VMA_ASSERT(0);
13583 }
13584 
13585 void VmaBlockVector::IncrementallySortBlocks()
13586 {
13587  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13588  {
13589  // Bubble sort only until first swap.
13590  for(size_t i = 1; i < m_Blocks.size(); ++i)
13591  {
13592  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
13593  {
13594  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
13595  return;
13596  }
13597  }
13598  }
13599 }
13600 
13601 VkResult VmaBlockVector::AllocateFromBlock(
13602  VmaDeviceMemoryBlock* pBlock,
13603  uint32_t currentFrameIndex,
13604  VkDeviceSize size,
13605  VkDeviceSize alignment,
13606  VmaAllocationCreateFlags allocFlags,
13607  void* pUserData,
13608  VmaSuballocationType suballocType,
13609  uint32_t strategy,
13610  VmaAllocation* pAllocation)
13611 {
13612  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
13613  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13614  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13615  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13616 
13617  VmaAllocationRequest currRequest = {};
13618  if(pBlock->m_pMetadata->CreateAllocationRequest(
13619  currentFrameIndex,
13620  m_FrameInUseCount,
13621  m_BufferImageGranularity,
13622  size,
13623  alignment,
13624  isUpperAddress,
13625  suballocType,
13626  false, // canMakeOtherLost
13627  strategy,
13628  &currRequest))
13629  {
13630  // Allocate from pCurrBlock.
13631  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
13632 
13633  if(mapped)
13634  {
13635  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
13636  if(res != VK_SUCCESS)
13637  {
13638  return res;
13639  }
13640  }
13641 
13642  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13643  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
13644  UpdateHasEmptyBlock();
13645  (*pAllocation)->InitBlockAllocation(
13646  pBlock,
13647  currRequest.offset,
13648  alignment,
13649  size,
13650  m_MemoryTypeIndex,
13651  suballocType,
13652  mapped,
13653  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13654  VMA_HEAVY_ASSERT(pBlock->Validate());
13655  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
13656  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13657  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13658  {
13659  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13660  }
13661  if(IsCorruptionDetectionEnabled())
13662  {
13663  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
13664  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13665  }
13666  return VK_SUCCESS;
13667  }
13668  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13669 }
13670 
13671 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
13672 {
13673  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
13674  allocInfo.pNext = m_pMemoryAllocateNext;
13675  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
13676  allocInfo.allocationSize = blockSize;
13677 
13678 #if VMA_BUFFER_DEVICE_ADDRESS
13679  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
13680  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
13681  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
13682  {
13683  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
13684  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
13685  }
13686 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
13687 
13688 #if VMA_MEMORY_PRIORITY
13689  VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
13690  if(m_hAllocator->m_UseExtMemoryPriority)
13691  {
13692  priorityInfo.priority = m_Priority;
13693  VmaPnextChainPushFront(&allocInfo, &priorityInfo);
13694  }
13695 #endif // #if VMA_MEMORY_PRIORITY
13696 
13697 #if VMA_EXTERNAL_MEMORY
13698  // Attach VkExportMemoryAllocateInfoKHR if necessary.
13699  VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
13700  exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);
13701  if(exportMemoryAllocInfo.handleTypes != 0)
13702  {
13703  VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
13704  }
13705 #endif // #if VMA_EXTERNAL_MEMORY
13706 
13707  VkDeviceMemory mem = VK_NULL_HANDLE;
13708  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
13709  if(res < 0)
13710  {
13711  return res;
13712  }
13713 
13714  // New VkDeviceMemory successfully created.
13715 
13716  // Create new Allocation for it.
13717  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
13718  pBlock->Init(
13719  m_hAllocator,
13720  m_hParentPool,
13721  m_MemoryTypeIndex,
13722  mem,
13723  allocInfo.allocationSize,
13724  m_NextBlockId++,
13725  m_Algorithm);
13726 
13727  m_Blocks.push_back(pBlock);
13728  if(pNewBlockIndex != VMA_NULL)
13729  {
13730  *pNewBlockIndex = m_Blocks.size() - 1;
13731  }
13732 
13733  return VK_SUCCESS;
13734 }
13735 
13736 void VmaBlockVector::ApplyDefragmentationMovesCpu(
13737  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13738  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
13739 {
13740  const size_t blockCount = m_Blocks.size();
13741  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
13742 
13743  enum BLOCK_FLAG
13744  {
13745  BLOCK_FLAG_USED = 0x00000001,
13746  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
13747  };
13748 
13749  struct BlockInfo
13750  {
13751  uint32_t flags;
13752  void* pMappedData;
13753  };
13754  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
13755  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
13756  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
13757 
13758  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13759  const size_t moveCount = moves.size();
13760  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13761  {
13762  const VmaDefragmentationMove& move = moves[moveIndex];
13763  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
13764  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
13765  }
13766 
13767  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13768 
13769  // Go over all blocks. Get mapped pointer or map if necessary.
13770  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13771  {
13772  BlockInfo& currBlockInfo = blockInfo[blockIndex];
13773  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13774  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
13775  {
13776  currBlockInfo.pMappedData = pBlock->GetMappedData();
13777  // It is not originally mapped - map it.
13778  if(currBlockInfo.pMappedData == VMA_NULL)
13779  {
13780  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
13781  if(pDefragCtx->res == VK_SUCCESS)
13782  {
13783  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
13784  }
13785  }
13786  }
13787  }
13788 
13789  // Go over all moves. Do actual data transfer.
13790  if(pDefragCtx->res == VK_SUCCESS)
13791  {
13792  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13793  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13794 
13795  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13796  {
13797  const VmaDefragmentationMove& move = moves[moveIndex];
13798 
13799  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
13800  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
13801 
13802  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
13803 
13804  // Invalidate source.
13805  if(isNonCoherent)
13806  {
13807  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
13808  memRange.memory = pSrcBlock->GetDeviceMemory();
13809  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
13810  memRange.size = VMA_MIN(
13811  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
13812  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
13813  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13814  }
13815 
13816  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
13817  memmove(
13818  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
13819  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
13820  static_cast<size_t>(move.size));
13821 
13822  if(IsCorruptionDetectionEnabled())
13823  {
13824  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
13825  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
13826  }
13827 
13828  // Flush destination.
13829  if(isNonCoherent)
13830  {
13831  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
13832  memRange.memory = pDstBlock->GetDeviceMemory();
13833  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
13834  memRange.size = VMA_MIN(
13835  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
13836  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
13837  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13838  }
13839  }
13840  }
13841 
13842  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
13843  // Regardless of pCtx->res == VK_SUCCESS.
13844  for(size_t blockIndex = blockCount; blockIndex--; )
13845  {
13846  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
13847  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
13848  {
13849  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13850  pBlock->Unmap(m_hAllocator, 1);
13851  }
13852  }
13853 }
13854 
13855 void VmaBlockVector::ApplyDefragmentationMovesGpu(
13856  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13857  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13858  VkCommandBuffer commandBuffer)
13859 {
13860  const size_t blockCount = m_Blocks.size();
13861 
13862  pDefragCtx->blockContexts.resize(blockCount);
13863  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
13864 
13865  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13866  const size_t moveCount = moves.size();
13867  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13868  {
13869  const VmaDefragmentationMove& move = moves[moveIndex];
13870 
13871  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
13872  {
13873  // Old school move still require us to map the whole block
13874  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13875  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13876  }
13877  }
13878 
13879  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13880 
13881  // Go over all blocks. Create and bind buffer for whole block if necessary.
13882  {
13883  VkBufferCreateInfo bufCreateInfo;
13884  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
13885 
13886  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13887  {
13888  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
13889  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13890  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
13891  {
13892  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
13893  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
13894  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
13895  if(pDefragCtx->res == VK_SUCCESS)
13896  {
13897  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
13898  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
13899  }
13900  }
13901  }
13902  }
13903 
13904  // Go over all moves. Post data transfer commands to command buffer.
13905  if(pDefragCtx->res == VK_SUCCESS)
13906  {
13907  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13908  {
13909  const VmaDefragmentationMove& move = moves[moveIndex];
13910 
13911  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
13912  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
13913 
13914  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
13915 
13916  VkBufferCopy region = {
13917  move.srcOffset,
13918  move.dstOffset,
13919  move.size };
13920  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
13921  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
13922  }
13923  }
13924 
13925  // Save buffers to defrag context for later destruction.
13926  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
13927  {
13928  pDefragCtx->res = VK_NOT_READY;
13929  }
13930 }
13931 
13932 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
13933 {
13934  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13935  {
13936  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13937  if(pBlock->m_pMetadata->IsEmpty())
13938  {
13939  if(m_Blocks.size() > m_MinBlockCount)
13940  {
13941  if(pDefragmentationStats != VMA_NULL)
13942  {
13943  ++pDefragmentationStats->deviceMemoryBlocksFreed;
13944  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
13945  }
13946 
13947  VmaVectorRemove(m_Blocks, blockIndex);
13948  pBlock->Destroy(m_hAllocator);
13949  vma_delete(m_hAllocator, pBlock);
13950  }
13951  else
13952  {
13953  break;
13954  }
13955  }
13956  }
13957  UpdateHasEmptyBlock();
13958 }
13959 
13960 void VmaBlockVector::UpdateHasEmptyBlock()
13961 {
13962  m_HasEmptyBlock = false;
13963  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
13964  {
13965  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
13966  if(pBlock->m_pMetadata->IsEmpty())
13967  {
13968  m_HasEmptyBlock = true;
13969  break;
13970  }
13971  }
13972 }
13973 
13974 #if VMA_STATS_STRING_ENABLED
13975 
13976 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13977 {
13978  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13979 
13980  json.BeginObject();
13981 
13982  if(IsCustomPool())
13983  {
13984  const char* poolName = m_hParentPool->GetName();
13985  if(poolName != VMA_NULL && poolName[0] != '\0')
13986  {
13987  json.WriteString("Name");
13988  json.WriteString(poolName);
13989  }
13990 
13991  json.WriteString("MemoryTypeIndex");
13992  json.WriteNumber(m_MemoryTypeIndex);
13993 
13994  json.WriteString("BlockSize");
13995  json.WriteNumber(m_PreferredBlockSize);
13996 
13997  json.WriteString("BlockCount");
13998  json.BeginObject(true);
13999  if(m_MinBlockCount > 0)
14000  {
14001  json.WriteString("Min");
14002  json.WriteNumber((uint64_t)m_MinBlockCount);
14003  }
14004  if(m_MaxBlockCount < SIZE_MAX)
14005  {
14006  json.WriteString("Max");
14007  json.WriteNumber((uint64_t)m_MaxBlockCount);
14008  }
14009  json.WriteString("Cur");
14010  json.WriteNumber((uint64_t)m_Blocks.size());
14011  json.EndObject();
14012 
14013  if(m_FrameInUseCount > 0)
14014  {
14015  json.WriteString("FrameInUseCount");
14016  json.WriteNumber(m_FrameInUseCount);
14017  }
14018 
14019  if(m_Algorithm != 0)
14020  {
14021  json.WriteString("Algorithm");
14022  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
14023  }
14024  }
14025  else
14026  {
14027  json.WriteString("PreferredBlockSize");
14028  json.WriteNumber(m_PreferredBlockSize);
14029  }
14030 
14031  json.WriteString("Blocks");
14032  json.BeginObject();
14033  for(size_t i = 0; i < m_Blocks.size(); ++i)
14034  {
14035  json.BeginString();
14036  json.ContinueString(m_Blocks[i]->GetId());
14037  json.EndString();
14038 
14039  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
14040  }
14041  json.EndObject();
14042 
14043  json.EndObject();
14044 }
14045 
14046 #endif // #if VMA_STATS_STRING_ENABLED
14047 
14048 void VmaBlockVector::Defragment(
14049  class VmaBlockVectorDefragmentationContext* pCtx,
14051  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
14052  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
14053  VkCommandBuffer commandBuffer)
14054 {
14055  pCtx->res = VK_SUCCESS;
14056 
14057  const VkMemoryPropertyFlags memPropFlags =
14058  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
14059  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
14060 
14061  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
14062  isHostVisible;
14063  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
14064  !IsCorruptionDetectionEnabled() &&
14065  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
14066 
14067  // There are options to defragment this memory type.
14068  if(canDefragmentOnCpu || canDefragmentOnGpu)
14069  {
14070  bool defragmentOnGpu;
14071  // There is only one option to defragment this memory type.
14072  if(canDefragmentOnGpu != canDefragmentOnCpu)
14073  {
14074  defragmentOnGpu = canDefragmentOnGpu;
14075  }
14076  // Both options are available: Heuristics to choose the best one.
14077  else
14078  {
14079  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
14080  m_hAllocator->IsIntegratedGpu();
14081  }
14082 
14083  bool overlappingMoveSupported = !defragmentOnGpu;
14084 
14085  if(m_hAllocator->m_UseMutex)
14086  {
14088  {
14089  if(!m_Mutex.TryLockWrite())
14090  {
14091  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
14092  return;
14093  }
14094  }
14095  else
14096  {
14097  m_Mutex.LockWrite();
14098  pCtx->mutexLocked = true;
14099  }
14100  }
14101 
14102  pCtx->Begin(overlappingMoveSupported, flags);
14103 
14104  // Defragment.
14105 
14106  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
14107  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
14108  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
14109 
14110  // Accumulate statistics.
14111  if(pStats != VMA_NULL)
14112  {
14113  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
14114  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
14115  pStats->bytesMoved += bytesMoved;
14116  pStats->allocationsMoved += allocationsMoved;
14117  VMA_ASSERT(bytesMoved <= maxBytesToMove);
14118  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
14119  if(defragmentOnGpu)
14120  {
14121  maxGpuBytesToMove -= bytesMoved;
14122  maxGpuAllocationsToMove -= allocationsMoved;
14123  }
14124  else
14125  {
14126  maxCpuBytesToMove -= bytesMoved;
14127  maxCpuAllocationsToMove -= allocationsMoved;
14128  }
14129  }
14130 
14132  {
14133  if(m_hAllocator->m_UseMutex)
14134  m_Mutex.UnlockWrite();
14135 
14136  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
14137  pCtx->res = VK_NOT_READY;
14138 
14139  return;
14140  }
14141 
14142  if(pCtx->res >= VK_SUCCESS)
14143  {
14144  if(defragmentOnGpu)
14145  {
14146  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
14147  }
14148  else
14149  {
14150  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
14151  }
14152  }
14153  }
14154 }
14155 
14156 void VmaBlockVector::DefragmentationEnd(
14157  class VmaBlockVectorDefragmentationContext* pCtx,
14158  uint32_t flags,
14159  VmaDefragmentationStats* pStats)
14160 {
14161  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
14162  {
14163  VMA_ASSERT(pCtx->mutexLocked == false);
14164 
14165  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
14166  // lock protecting us. Since we mutate state here, we have to take the lock out now
14167  m_Mutex.LockWrite();
14168  pCtx->mutexLocked = true;
14169  }
14170 
14171  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
14172  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
14173  {
14174  // Destroy buffers.
14175  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
14176  {
14177  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
14178  if(blockCtx.hBuffer)
14179  {
14180  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
14181  }
14182  }
14183 
14184  if(pCtx->res >= VK_SUCCESS)
14185  {
14186  FreeEmptyBlocks(pStats);
14187  }
14188  }
14189 
14190  if(pCtx->mutexLocked)
14191  {
14192  VMA_ASSERT(m_hAllocator->m_UseMutex);
14193  m_Mutex.UnlockWrite();
14194  }
14195 }
14196 
14197 uint32_t VmaBlockVector::ProcessDefragmentations(
14198  class VmaBlockVectorDefragmentationContext *pCtx,
14199  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
14200 {
14201  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
14202 
14203  const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
14204 
14205  for(uint32_t i = 0; i < moveCount; ++ i)
14206  {
14207  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
14208 
14209  pMove->allocation = move.hAllocation;
14210  pMove->memory = move.pDstBlock->GetDeviceMemory();
14211  pMove->offset = move.dstOffset;
14212 
14213  ++ pMove;
14214  }
14215 
14216  pCtx->defragmentationMovesProcessed += moveCount;
14217 
14218  return moveCount;
14219 }
14220 
14221 void VmaBlockVector::CommitDefragmentations(
14222  class VmaBlockVectorDefragmentationContext *pCtx,
14223  VmaDefragmentationStats* pStats)
14224 {
14225  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
14226 
14227  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
14228  {
14229  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
14230 
14231  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
14232  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
14233  }
14234 
14235  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
14236  FreeEmptyBlocks(pStats);
14237 }
14238 
14239 size_t VmaBlockVector::CalcAllocationCount() const
14240 {
14241  size_t result = 0;
14242  for(size_t i = 0; i < m_Blocks.size(); ++i)
14243  {
14244  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
14245  }
14246  return result;
14247 }
14248 
14249 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
14250 {
14251  if(m_BufferImageGranularity == 1)
14252  {
14253  return false;
14254  }
14255  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
14256  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
14257  {
14258  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
14259  VMA_ASSERT(m_Algorithm == 0);
14260  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
14261  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
14262  {
14263  return true;
14264  }
14265  }
14266  return false;
14267 }
14268 
14269 void VmaBlockVector::MakePoolAllocationsLost(
14270  uint32_t currentFrameIndex,
14271  size_t* pLostAllocationCount)
14272 {
14273  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
14274  size_t lostAllocationCount = 0;
14275  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
14276  {
14277  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
14278  VMA_ASSERT(pBlock);
14279  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
14280  }
14281  if(pLostAllocationCount != VMA_NULL)
14282  {
14283  *pLostAllocationCount = lostAllocationCount;
14284  }
14285 }
14286 
14287 VkResult VmaBlockVector::CheckCorruption()
14288 {
14289  if(!IsCorruptionDetectionEnabled())
14290  {
14291  return VK_ERROR_FEATURE_NOT_PRESENT;
14292  }
14293 
14294  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
14295  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
14296  {
14297  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
14298  VMA_ASSERT(pBlock);
14299  VkResult res = pBlock->CheckCorruption(m_hAllocator);
14300  if(res != VK_SUCCESS)
14301  {
14302  return res;
14303  }
14304  }
14305  return VK_SUCCESS;
14306 }
14307 
14308 void VmaBlockVector::AddStats(VmaStats* pStats)
14309 {
14310  const uint32_t memTypeIndex = m_MemoryTypeIndex;
14311  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
14312 
14313  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
14314 
14315  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
14316  {
14317  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
14318  VMA_ASSERT(pBlock);
14319  VMA_HEAVY_ASSERT(pBlock->Validate());
14320  VmaStatInfo allocationStatInfo;
14321  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
14322  VmaAddStatInfo(pStats->total, allocationStatInfo);
14323  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14324  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14325  }
14326 }
14327 
14329 // VmaDefragmentationAlgorithm_Generic members definition
14330 
14331 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
14332  VmaAllocator hAllocator,
14333  VmaBlockVector* pBlockVector,
14334  uint32_t currentFrameIndex,
14335  bool overlappingMoveSupported) :
14336  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14337  m_AllocationCount(0),
14338  m_AllAllocations(false),
14339  m_BytesMoved(0),
14340  m_AllocationsMoved(0),
14341  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
14342 {
14343  // Create block info for each block.
14344  const size_t blockCount = m_pBlockVector->m_Blocks.size();
14345  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14346  {
14347  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
14348  pBlockInfo->m_OriginalBlockIndex = blockIndex;
14349  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
14350  m_Blocks.push_back(pBlockInfo);
14351  }
14352 
14353  // Sort them by m_pBlock pointer value.
14354  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
14355 }
14356 
14357 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
14358 {
14359  for(size_t i = m_Blocks.size(); i--; )
14360  {
14361  vma_delete(m_hAllocator, m_Blocks[i]);
14362  }
14363 }
14364 
14365 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14366 {
14367  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
14368  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
14369  {
14370  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
14371  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
14372  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
14373  {
14374  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
14375  (*it)->m_Allocations.push_back(allocInfo);
14376  }
14377  else
14378  {
14379  VMA_ASSERT(0);
14380  }
14381 
14382  ++m_AllocationCount;
14383  }
14384 }
14385 
14386 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
14387  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14388  VkDeviceSize maxBytesToMove,
14389  uint32_t maxAllocationsToMove,
14390  bool freeOldAllocations)
14391 {
14392  if(m_Blocks.empty())
14393  {
14394  return VK_SUCCESS;
14395  }
14396 
14397  // This is a choice based on research.
14398  // Option 1:
14399  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
14400  // Option 2:
14401  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
14402  // Option 3:
14403  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
14404 
14405  size_t srcBlockMinIndex = 0;
14406  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
14407  /*
14408  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
14409  {
14410  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
14411  if(blocksWithNonMovableCount > 0)
14412  {
14413  srcBlockMinIndex = blocksWithNonMovableCount - 1;
14414  }
14415  }
14416  */
14417 
14418  size_t srcBlockIndex = m_Blocks.size() - 1;
14419  size_t srcAllocIndex = SIZE_MAX;
14420  for(;;)
14421  {
14422  // 1. Find next allocation to move.
14423  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
14424  // 1.2. Then start from last to first m_Allocations.
14425  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
14426  {
14427  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
14428  {
14429  // Finished: no more allocations to process.
14430  if(srcBlockIndex == srcBlockMinIndex)
14431  {
14432  return VK_SUCCESS;
14433  }
14434  else
14435  {
14436  --srcBlockIndex;
14437  srcAllocIndex = SIZE_MAX;
14438  }
14439  }
14440  else
14441  {
14442  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
14443  }
14444  }
14445 
14446  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
14447  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
14448 
14449  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
14450  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
14451  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
14452  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
14453 
14454  // 2. Try to find new place for this allocation in preceding or current block.
14455  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
14456  {
14457  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
14458  VmaAllocationRequest dstAllocRequest;
14459  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
14460  m_CurrentFrameIndex,
14461  m_pBlockVector->GetFrameInUseCount(),
14462  m_pBlockVector->GetBufferImageGranularity(),
14463  size,
14464  alignment,
14465  false, // upperAddress
14466  suballocType,
14467  false, // canMakeOtherLost
14468  strategy,
14469  &dstAllocRequest) &&
14470  MoveMakesSense(
14471  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
14472  {
14473  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
14474 
14475  // Reached limit on number of allocations or bytes to move.
14476  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
14477  (m_BytesMoved + size > maxBytesToMove))
14478  {
14479  return VK_SUCCESS;
14480  }
14481 
14482  VmaDefragmentationMove move = {};
14483  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
14484  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
14485  move.srcOffset = srcOffset;
14486  move.dstOffset = dstAllocRequest.offset;
14487  move.size = size;
14488  move.hAllocation = allocInfo.m_hAllocation;
14489  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
14490  move.pDstBlock = pDstBlockInfo->m_pBlock;
14491 
14492  moves.push_back(move);
14493 
14494  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
14495  dstAllocRequest,
14496  suballocType,
14497  size,
14498  allocInfo.m_hAllocation);
14499 
14500  if(freeOldAllocations)
14501  {
14502  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
14503  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
14504  }
14505 
14506  if(allocInfo.m_pChanged != VMA_NULL)
14507  {
14508  *allocInfo.m_pChanged = VK_TRUE;
14509  }
14510 
14511  ++m_AllocationsMoved;
14512  m_BytesMoved += size;
14513 
14514  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
14515 
14516  break;
14517  }
14518  }
14519 
14520  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
14521 
14522  if(srcAllocIndex > 0)
14523  {
14524  --srcAllocIndex;
14525  }
14526  else
14527  {
14528  if(srcBlockIndex > 0)
14529  {
14530  --srcBlockIndex;
14531  srcAllocIndex = SIZE_MAX;
14532  }
14533  else
14534  {
14535  return VK_SUCCESS;
14536  }
14537  }
14538  }
14539 }
14540 
14541 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
14542 {
14543  size_t result = 0;
14544  for(size_t i = 0; i < m_Blocks.size(); ++i)
14545  {
14546  if(m_Blocks[i]->m_HasNonMovableAllocations)
14547  {
14548  ++result;
14549  }
14550  }
14551  return result;
14552 }
14553 
14554 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
14555  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14556  VkDeviceSize maxBytesToMove,
14557  uint32_t maxAllocationsToMove,
14559 {
14560  if(!m_AllAllocations && m_AllocationCount == 0)
14561  {
14562  return VK_SUCCESS;
14563  }
14564 
14565  const size_t blockCount = m_Blocks.size();
14566  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14567  {
14568  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
14569 
14570  if(m_AllAllocations)
14571  {
14572  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
14573  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
14574  it != pMetadata->m_Suballocations.end();
14575  ++it)
14576  {
14577  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
14578  {
14579  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
14580  pBlockInfo->m_Allocations.push_back(allocInfo);
14581  }
14582  }
14583  }
14584 
14585  pBlockInfo->CalcHasNonMovableAllocations();
14586 
14587  // This is a choice based on research.
14588  // Option 1:
14589  pBlockInfo->SortAllocationsByOffsetDescending();
14590  // Option 2:
14591  //pBlockInfo->SortAllocationsBySizeDescending();
14592  }
14593 
14594  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
14595  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
14596 
14597  // This is a choice based on research.
14598  const uint32_t roundCount = 2;
14599 
14600  // Execute defragmentation rounds (the main part).
14601  VkResult result = VK_SUCCESS;
14602  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
14603  {
14604  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
14605  }
14606 
14607  return result;
14608 }
14609 
14610 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
14611  size_t dstBlockIndex, VkDeviceSize dstOffset,
14612  size_t srcBlockIndex, VkDeviceSize srcOffset)
14613 {
14614  if(dstBlockIndex < srcBlockIndex)
14615  {
14616  return true;
14617  }
14618  if(dstBlockIndex > srcBlockIndex)
14619  {
14620  return false;
14621  }
14622  if(dstOffset < srcOffset)
14623  {
14624  return true;
14625  }
14626  return false;
14627 }
14628 
14630 // VmaDefragmentationAlgorithm_Fast
14631 
14632 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
14633  VmaAllocator hAllocator,
14634  VmaBlockVector* pBlockVector,
14635  uint32_t currentFrameIndex,
14636  bool overlappingMoveSupported) :
14637  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14638  m_OverlappingMoveSupported(overlappingMoveSupported),
14639  m_AllocationCount(0),
14640  m_AllAllocations(false),
14641  m_BytesMoved(0),
14642  m_AllocationsMoved(0),
14643  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
14644 {
14645  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
14646 
14647 }
14648 
14649 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
14650 {
14651 }
14652 
14653 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
14654  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14655  VkDeviceSize maxBytesToMove,
14656  uint32_t maxAllocationsToMove,
14658 {
14659  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
14660 
14661  const size_t blockCount = m_pBlockVector->GetBlockCount();
14662  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
14663  {
14664  return VK_SUCCESS;
14665  }
14666 
14667  PreprocessMetadata();
14668 
14669  // Sort blocks in order from most destination.
14670 
14671  m_BlockInfos.resize(blockCount);
14672  for(size_t i = 0; i < blockCount; ++i)
14673  {
14674  m_BlockInfos[i].origBlockIndex = i;
14675  }
14676 
14677  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
14678  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
14679  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
14680  });
14681 
14682  // THE MAIN ALGORITHM
14683 
14684  FreeSpaceDatabase freeSpaceDb;
14685 
14686  size_t dstBlockInfoIndex = 0;
14687  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14688  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14689  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14690  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
14691  VkDeviceSize dstOffset = 0;
14692 
14693  bool end = false;
14694  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
14695  {
14696  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
14697  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
14698  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
14699  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
14700  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
14701  {
14702  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
14703  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
14704  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
14705  if(m_AllocationsMoved == maxAllocationsToMove ||
14706  m_BytesMoved + srcAllocSize > maxBytesToMove)
14707  {
14708  end = true;
14709  break;
14710  }
14711  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
14712 
14713  VmaDefragmentationMove move = {};
14714  // Try to place it in one of free spaces from the database.
14715  size_t freeSpaceInfoIndex;
14716  VkDeviceSize dstAllocOffset;
14717  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
14718  freeSpaceInfoIndex, dstAllocOffset))
14719  {
14720  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
14721  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
14722  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
14723 
14724  // Same block
14725  if(freeSpaceInfoIndex == srcBlockInfoIndex)
14726  {
14727  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14728 
14729  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14730 
14731  VmaSuballocation suballoc = *srcSuballocIt;
14732  suballoc.offset = dstAllocOffset;
14733  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
14734  m_BytesMoved += srcAllocSize;
14735  ++m_AllocationsMoved;
14736 
14737  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14738  ++nextSuballocIt;
14739  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14740  srcSuballocIt = nextSuballocIt;
14741 
14742  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14743 
14744  move.srcBlockIndex = srcOrigBlockIndex;
14745  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14746  move.srcOffset = srcAllocOffset;
14747  move.dstOffset = dstAllocOffset;
14748  move.size = srcAllocSize;
14749 
14750  moves.push_back(move);
14751  }
14752  // Different block
14753  else
14754  {
14755  // MOVE OPTION 2: Move the allocation to a different block.
14756 
14757  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
14758 
14759  VmaSuballocation suballoc = *srcSuballocIt;
14760  suballoc.offset = dstAllocOffset;
14761  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
14762  m_BytesMoved += srcAllocSize;
14763  ++m_AllocationsMoved;
14764 
14765  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14766  ++nextSuballocIt;
14767  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14768  srcSuballocIt = nextSuballocIt;
14769 
14770  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14771 
14772  move.srcBlockIndex = srcOrigBlockIndex;
14773  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14774  move.srcOffset = srcAllocOffset;
14775  move.dstOffset = dstAllocOffset;
14776  move.size = srcAllocSize;
14777 
14778  moves.push_back(move);
14779  }
14780  }
14781  else
14782  {
14783  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
14784 
14785  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
14786  while(dstBlockInfoIndex < srcBlockInfoIndex &&
14787  dstAllocOffset + srcAllocSize > dstBlockSize)
14788  {
14789  // But before that, register remaining free space at the end of dst block.
14790  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
14791 
14792  ++dstBlockInfoIndex;
14793  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14794  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14795  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14796  dstBlockSize = pDstMetadata->GetSize();
14797  dstOffset = 0;
14798  dstAllocOffset = 0;
14799  }
14800 
14801  // Same block
14802  if(dstBlockInfoIndex == srcBlockInfoIndex)
14803  {
14804  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14805 
14806  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
14807 
14808  bool skipOver = overlap;
14809  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
14810  {
14811  // If destination and source place overlap, skip if it would move it
14812  // by only < 1/64 of its size.
14813  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
14814  }
14815 
14816  if(skipOver)
14817  {
14818  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
14819 
14820  dstOffset = srcAllocOffset + srcAllocSize;
14821  ++srcSuballocIt;
14822  }
14823  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14824  else
14825  {
14826  srcSuballocIt->offset = dstAllocOffset;
14827  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
14828  dstOffset = dstAllocOffset + srcAllocSize;
14829  m_BytesMoved += srcAllocSize;
14830  ++m_AllocationsMoved;
14831  ++srcSuballocIt;
14832 
14833  move.srcBlockIndex = srcOrigBlockIndex;
14834  move.dstBlockIndex = dstOrigBlockIndex;
14835  move.srcOffset = srcAllocOffset;
14836  move.dstOffset = dstAllocOffset;
14837  move.size = srcAllocSize;
14838 
14839  moves.push_back(move);
14840  }
14841  }
14842  // Different block
14843  else
14844  {
14845  // MOVE OPTION 2: Move the allocation to a different block.
14846 
14847  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
14848  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
14849 
14850  VmaSuballocation suballoc = *srcSuballocIt;
14851  suballoc.offset = dstAllocOffset;
14852  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
14853  dstOffset = dstAllocOffset + srcAllocSize;
14854  m_BytesMoved += srcAllocSize;
14855  ++m_AllocationsMoved;
14856 
14857  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14858  ++nextSuballocIt;
14859  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14860  srcSuballocIt = nextSuballocIt;
14861 
14862  pDstMetadata->m_Suballocations.push_back(suballoc);
14863 
14864  move.srcBlockIndex = srcOrigBlockIndex;
14865  move.dstBlockIndex = dstOrigBlockIndex;
14866  move.srcOffset = srcAllocOffset;
14867  move.dstOffset = dstAllocOffset;
14868  move.size = srcAllocSize;
14869 
14870  moves.push_back(move);
14871  }
14872  }
14873  }
14874  }
14875 
14876  m_BlockInfos.clear();
14877 
14878  PostprocessMetadata();
14879 
14880  return VK_SUCCESS;
14881 }
14882 
14883 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
14884 {
14885  const size_t blockCount = m_pBlockVector->GetBlockCount();
14886  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14887  {
14888  VmaBlockMetadata_Generic* const pMetadata =
14889  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14890  pMetadata->m_FreeCount = 0;
14891  pMetadata->m_SumFreeSize = pMetadata->GetSize();
14892  pMetadata->m_FreeSuballocationsBySize.clear();
14893  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14894  it != pMetadata->m_Suballocations.end(); )
14895  {
14896  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
14897  {
14898  VmaSuballocationList::iterator nextIt = it;
14899  ++nextIt;
14900  pMetadata->m_Suballocations.erase(it);
14901  it = nextIt;
14902  }
14903  else
14904  {
14905  ++it;
14906  }
14907  }
14908  }
14909 }
14910 
14911 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
14912 {
14913  const size_t blockCount = m_pBlockVector->GetBlockCount();
14914  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14915  {
14916  VmaBlockMetadata_Generic* const pMetadata =
14917  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14918  const VkDeviceSize blockSize = pMetadata->GetSize();
14919 
14920  // No allocations in this block - entire area is free.
14921  if(pMetadata->m_Suballocations.empty())
14922  {
14923  pMetadata->m_FreeCount = 1;
14924  //pMetadata->m_SumFreeSize is already set to blockSize.
14925  VmaSuballocation suballoc = {
14926  0, // offset
14927  blockSize, // size
14928  VMA_NULL, // hAllocation
14929  VMA_SUBALLOCATION_TYPE_FREE };
14930  pMetadata->m_Suballocations.push_back(suballoc);
14931  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
14932  }
14933  // There are some allocations in this block.
14934  else
14935  {
14936  VkDeviceSize offset = 0;
14937  VmaSuballocationList::iterator it;
14938  for(it = pMetadata->m_Suballocations.begin();
14939  it != pMetadata->m_Suballocations.end();
14940  ++it)
14941  {
14942  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
14943  VMA_ASSERT(it->offset >= offset);
14944 
14945  // Need to insert preceding free space.
14946  if(it->offset > offset)
14947  {
14948  ++pMetadata->m_FreeCount;
14949  const VkDeviceSize freeSize = it->offset - offset;
14950  VmaSuballocation suballoc = {
14951  offset, // offset
14952  freeSize, // size
14953  VMA_NULL, // hAllocation
14954  VMA_SUBALLOCATION_TYPE_FREE };
14955  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14956  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14957  {
14958  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
14959  }
14960  }
14961 
14962  pMetadata->m_SumFreeSize -= it->size;
14963  offset = it->offset + it->size;
14964  }
14965 
14966  // Need to insert trailing free space.
14967  if(offset < blockSize)
14968  {
14969  ++pMetadata->m_FreeCount;
14970  const VkDeviceSize freeSize = blockSize - offset;
14971  VmaSuballocation suballoc = {
14972  offset, // offset
14973  freeSize, // size
14974  VMA_NULL, // hAllocation
14975  VMA_SUBALLOCATION_TYPE_FREE };
14976  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
14977  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14978  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14979  {
14980  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
14981  }
14982  }
14983 
14984  VMA_SORT(
14985  pMetadata->m_FreeSuballocationsBySize.begin(),
14986  pMetadata->m_FreeSuballocationsBySize.end(),
14987  VmaSuballocationItemSizeLess());
14988  }
14989 
14990  VMA_HEAVY_ASSERT(pMetadata->Validate());
14991  }
14992 }
14993 
14994 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
14995 {
14996  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
14997  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14998  while(it != pMetadata->m_Suballocations.end())
14999  {
15000  if(it->offset < suballoc.offset)
15001  {
15002  ++it;
15003  }
15004  }
15005  pMetadata->m_Suballocations.insert(it, suballoc);
15006 }
15007 
15009 // VmaBlockVectorDefragmentationContext
15010 
15011 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
15012  VmaAllocator hAllocator,
15013  VmaPool hCustomPool,
15014  VmaBlockVector* pBlockVector,
15015  uint32_t currFrameIndex) :
15016  res(VK_SUCCESS),
15017  mutexLocked(false),
15018  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
15019  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
15020  defragmentationMovesProcessed(0),
15021  defragmentationMovesCommitted(0),
15022  hasDefragmentationPlan(0),
15023  m_hAllocator(hAllocator),
15024  m_hCustomPool(hCustomPool),
15025  m_pBlockVector(pBlockVector),
15026  m_CurrFrameIndex(currFrameIndex),
15027  m_pAlgorithm(VMA_NULL),
15028  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
15029  m_AllAllocations(false)
15030 {
15031 }
15032 
15033 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
15034 {
15035  vma_delete(m_hAllocator, m_pAlgorithm);
15036 }
15037 
15038 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
15039 {
15040  AllocInfo info = { hAlloc, pChanged };
15041  m_Allocations.push_back(info);
15042 }
15043 
15044 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
15045 {
15046  const bool allAllocations = m_AllAllocations ||
15047  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
15048 
15049  /********************************
15050  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
15051  ********************************/
15052 
15053  /*
15054  Fast algorithm is supported only when certain criteria are met:
15055  - VMA_DEBUG_MARGIN is 0.
15056  - All allocations in this block vector are moveable.
15057  - There is no possibility of image/buffer granularity conflict.
15058  - The defragmentation is not incremental
15059  */
15060  if(VMA_DEBUG_MARGIN == 0 &&
15061  allAllocations &&
15062  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
15064  {
15065  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
15066  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
15067  }
15068  else
15069  {
15070  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
15071  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
15072  }
15073 
15074  if(allAllocations)
15075  {
15076  m_pAlgorithm->AddAll();
15077  }
15078  else
15079  {
15080  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
15081  {
15082  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
15083  }
15084  }
15085 }
15086 
15088 // VmaDefragmentationContext
15089 
15090 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
15091  VmaAllocator hAllocator,
15092  uint32_t currFrameIndex,
15093  uint32_t flags,
15094  VmaDefragmentationStats* pStats) :
15095  m_hAllocator(hAllocator),
15096  m_CurrFrameIndex(currFrameIndex),
15097  m_Flags(flags),
15098  m_pStats(pStats),
15099  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
15100 {
15101  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
15102 }
15103 
15104 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
15105 {
15106  for(size_t i = m_CustomPoolContexts.size(); i--; )
15107  {
15108  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
15109  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
15110  vma_delete(m_hAllocator, pBlockVectorCtx);
15111  }
15112  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
15113  {
15114  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
15115  if(pBlockVectorCtx)
15116  {
15117  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
15118  vma_delete(m_hAllocator, pBlockVectorCtx);
15119  }
15120  }
15121 }
15122 
15123 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
15124 {
15125  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15126  {
15127  VmaPool pool = pPools[poolIndex];
15128  VMA_ASSERT(pool);
15129  // Pools with algorithm other than default are not defragmented.
15130  if(pool->m_BlockVector.GetAlgorithm() == 0)
15131  {
15132  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
15133 
15134  for(size_t i = m_CustomPoolContexts.size(); i--; )
15135  {
15136  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
15137  {
15138  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
15139  break;
15140  }
15141  }
15142 
15143  if(!pBlockVectorDefragCtx)
15144  {
15145  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
15146  m_hAllocator,
15147  pool,
15148  &pool->m_BlockVector,
15149  m_CurrFrameIndex);
15150  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
15151  }
15152 
15153  pBlockVectorDefragCtx->AddAll();
15154  }
15155  }
15156 }
15157 
15158 void VmaDefragmentationContext_T::AddAllocations(
15159  uint32_t allocationCount,
15160  const VmaAllocation* pAllocations,
15161  VkBool32* pAllocationsChanged)
15162 {
15163  // Dispatch pAllocations among defragmentators. Create them when necessary.
15164  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15165  {
15166  const VmaAllocation hAlloc = pAllocations[allocIndex];
15167  VMA_ASSERT(hAlloc);
15168  // DedicatedAlloc cannot be defragmented.
15169  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
15170  // Lost allocation cannot be defragmented.
15171  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
15172  {
15173  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
15174 
15175  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
15176  // This allocation belongs to custom pool.
15177  if(hAllocPool != VK_NULL_HANDLE)
15178  {
15179  // Pools with algorithm other than default are not defragmented.
15180  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
15181  {
15182  for(size_t i = m_CustomPoolContexts.size(); i--; )
15183  {
15184  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
15185  {
15186  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
15187  break;
15188  }
15189  }
15190  if(!pBlockVectorDefragCtx)
15191  {
15192  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
15193  m_hAllocator,
15194  hAllocPool,
15195  &hAllocPool->m_BlockVector,
15196  m_CurrFrameIndex);
15197  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
15198  }
15199  }
15200  }
15201  // This allocation belongs to default pool.
15202  else
15203  {
15204  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
15205  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
15206  if(!pBlockVectorDefragCtx)
15207  {
15208  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
15209  m_hAllocator,
15210  VMA_NULL, // hCustomPool
15211  m_hAllocator->m_pBlockVectors[memTypeIndex],
15212  m_CurrFrameIndex);
15213  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
15214  }
15215  }
15216 
15217  if(pBlockVectorDefragCtx)
15218  {
15219  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
15220  &pAllocationsChanged[allocIndex] : VMA_NULL;
15221  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
15222  }
15223  }
15224  }
15225 }
15226 
15227 VkResult VmaDefragmentationContext_T::Defragment(
15228  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
15229  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
15230  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
15231 {
15232  if(pStats)
15233  {
15234  memset(pStats, 0, sizeof(VmaDefragmentationStats));
15235  }
15236 
15238  {
15239  // For incremental defragmetnations, we just earmark how much we can move
15240  // The real meat is in the defragmentation steps
15241  m_MaxCpuBytesToMove = maxCpuBytesToMove;
15242  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
15243 
15244  m_MaxGpuBytesToMove = maxGpuBytesToMove;
15245  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
15246 
15247  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
15248  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
15249  return VK_SUCCESS;
15250 
15251  return VK_NOT_READY;
15252  }
15253 
15254  if(commandBuffer == VK_NULL_HANDLE)
15255  {
15256  maxGpuBytesToMove = 0;
15257  maxGpuAllocationsToMove = 0;
15258  }
15259 
15260  VkResult res = VK_SUCCESS;
15261 
15262  // Process default pools.
15263  for(uint32_t memTypeIndex = 0;
15264  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
15265  ++memTypeIndex)
15266  {
15267  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15268  if(pBlockVectorCtx)
15269  {
15270  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15271  pBlockVectorCtx->GetBlockVector()->Defragment(
15272  pBlockVectorCtx,
15273  pStats, flags,
15274  maxCpuBytesToMove, maxCpuAllocationsToMove,
15275  maxGpuBytesToMove, maxGpuAllocationsToMove,
15276  commandBuffer);
15277  if(pBlockVectorCtx->res != VK_SUCCESS)
15278  {
15279  res = pBlockVectorCtx->res;
15280  }
15281  }
15282  }
15283 
15284  // Process custom pools.
15285  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15286  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
15287  ++customCtxIndex)
15288  {
15289  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15290  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15291  pBlockVectorCtx->GetBlockVector()->Defragment(
15292  pBlockVectorCtx,
15293  pStats, flags,
15294  maxCpuBytesToMove, maxCpuAllocationsToMove,
15295  maxGpuBytesToMove, maxGpuAllocationsToMove,
15296  commandBuffer);
15297  if(pBlockVectorCtx->res != VK_SUCCESS)
15298  {
15299  res = pBlockVectorCtx->res;
15300  }
15301  }
15302 
15303  return res;
15304 }
15305 
15306 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
15307 {
15308  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
15309  uint32_t movesLeft = pInfo->moveCount;
15310 
15311  // Process default pools.
15312  for(uint32_t memTypeIndex = 0;
15313  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
15314  ++memTypeIndex)
15315  {
15316  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15317  if(pBlockVectorCtx)
15318  {
15319  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15320 
15321  if(!pBlockVectorCtx->hasDefragmentationPlan)
15322  {
15323  pBlockVectorCtx->GetBlockVector()->Defragment(
15324  pBlockVectorCtx,
15325  m_pStats, m_Flags,
15326  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
15327  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
15328  VK_NULL_HANDLE);
15329 
15330  if(pBlockVectorCtx->res < VK_SUCCESS)
15331  continue;
15332 
15333  pBlockVectorCtx->hasDefragmentationPlan = true;
15334  }
15335 
15336  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
15337  pBlockVectorCtx,
15338  pCurrentMove, movesLeft);
15339 
15340  movesLeft -= processed;
15341  pCurrentMove += processed;
15342  }
15343  }
15344 
15345  // Process custom pools.
15346  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15347  customCtxIndex < customCtxCount;
15348  ++customCtxIndex)
15349  {
15350  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15351  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15352 
15353  if(!pBlockVectorCtx->hasDefragmentationPlan)
15354  {
15355  pBlockVectorCtx->GetBlockVector()->Defragment(
15356  pBlockVectorCtx,
15357  m_pStats, m_Flags,
15358  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
15359  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
15360  VK_NULL_HANDLE);
15361 
15362  if(pBlockVectorCtx->res < VK_SUCCESS)
15363  continue;
15364 
15365  pBlockVectorCtx->hasDefragmentationPlan = true;
15366  }
15367 
15368  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
15369  pBlockVectorCtx,
15370  pCurrentMove, movesLeft);
15371 
15372  movesLeft -= processed;
15373  pCurrentMove += processed;
15374  }
15375 
15376  pInfo->moveCount = pInfo->moveCount - movesLeft;
15377 
15378  return VK_SUCCESS;
15379 }
15380 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
15381 {
15382  VkResult res = VK_SUCCESS;
15383 
15384  // Process default pools.
15385  for(uint32_t memTypeIndex = 0;
15386  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
15387  ++memTypeIndex)
15388  {
15389  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15390  if(pBlockVectorCtx)
15391  {
15392  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15393 
15394  if(!pBlockVectorCtx->hasDefragmentationPlan)
15395  {
15396  res = VK_NOT_READY;
15397  continue;
15398  }
15399 
15400  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
15401  pBlockVectorCtx, m_pStats);
15402 
15403  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15404  res = VK_NOT_READY;
15405  }
15406  }
15407 
15408  // Process custom pools.
15409  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15410  customCtxIndex < customCtxCount;
15411  ++customCtxIndex)
15412  {
15413  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15414  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15415 
15416  if(!pBlockVectorCtx->hasDefragmentationPlan)
15417  {
15418  res = VK_NOT_READY;
15419  continue;
15420  }
15421 
15422  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
15423  pBlockVectorCtx, m_pStats);
15424 
15425  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15426  res = VK_NOT_READY;
15427  }
15428 
15429  return res;
15430 }
15431 
15433 // VmaRecorder
15434 
15435 #if VMA_RECORDING_ENABLED
15436 
15437 VmaRecorder::VmaRecorder() :
15438  m_UseMutex(true),
15439  m_Flags(0),
15440  m_File(VMA_NULL),
15441  m_RecordingStartTime(std::chrono::high_resolution_clock::now())
15442 {
15443 }
15444 
15445 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
15446 {
15447  m_UseMutex = useMutex;
15448  m_Flags = settings.flags;
15449 
15450 #if defined(_WIN32)
15451  // Open file for writing.
15452  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
15453 
15454  if(err != 0)
15455  {
15456  return VK_ERROR_INITIALIZATION_FAILED;
15457  }
15458 #else
15459  // Open file for writing.
15460  m_File = fopen(settings.pFilePath, "wb");
15461 
15462  if(m_File == 0)
15463  {
15464  return VK_ERROR_INITIALIZATION_FAILED;
15465  }
15466 #endif
15467 
15468  // Write header.
15469  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
15470  fprintf(m_File, "%s\n", "1,8");
15471 
15472  return VK_SUCCESS;
15473 }
15474 
15475 VmaRecorder::~VmaRecorder()
15476 {
15477  if(m_File != VMA_NULL)
15478  {
15479  fclose(m_File);
15480  }
15481 }
15482 
15483 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
15484 {
15485  CallParams callParams;
15486  GetBasicParams(callParams);
15487 
15488  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15489  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
15490  Flush();
15491 }
15492 
15493 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
15494 {
15495  CallParams callParams;
15496  GetBasicParams(callParams);
15497 
15498  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15499  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
15500  Flush();
15501 }
15502 
15503 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
15504 {
15505  CallParams callParams;
15506  GetBasicParams(callParams);
15507 
15508  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15509  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
15510  createInfo.memoryTypeIndex,
15511  createInfo.flags,
15512  createInfo.blockSize,
15513  (uint64_t)createInfo.minBlockCount,
15514  (uint64_t)createInfo.maxBlockCount,
15515  createInfo.frameInUseCount,
15516  pool);
15517  Flush();
15518 }
15519 
15520 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
15521 {
15522  CallParams callParams;
15523  GetBasicParams(callParams);
15524 
15525  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15526  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
15527  pool);
15528  Flush();
15529 }
15530 
15531 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
15532  const VkMemoryRequirements& vkMemReq,
15533  const VmaAllocationCreateInfo& createInfo,
15534  VmaAllocation allocation)
15535 {
15536  CallParams callParams;
15537  GetBasicParams(callParams);
15538 
15539  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15540  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15541  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15542  vkMemReq.size,
15543  vkMemReq.alignment,
15544  vkMemReq.memoryTypeBits,
15545  createInfo.flags,
15546  createInfo.usage,
15547  createInfo.requiredFlags,
15548  createInfo.preferredFlags,
15549  createInfo.memoryTypeBits,
15550  createInfo.pool,
15551  allocation,
15552  userDataStr.GetString());
15553  Flush();
15554 }
15555 
15556 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
15557  const VkMemoryRequirements& vkMemReq,
15558  const VmaAllocationCreateInfo& createInfo,
15559  uint64_t allocationCount,
15560  const VmaAllocation* pAllocations)
15561 {
15562  CallParams callParams;
15563  GetBasicParams(callParams);
15564 
15565  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15566  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15567  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
15568  vkMemReq.size,
15569  vkMemReq.alignment,
15570  vkMemReq.memoryTypeBits,
15571  createInfo.flags,
15572  createInfo.usage,
15573  createInfo.requiredFlags,
15574  createInfo.preferredFlags,
15575  createInfo.memoryTypeBits,
15576  createInfo.pool);
15577  PrintPointerList(allocationCount, pAllocations);
15578  fprintf(m_File, ",%s\n", userDataStr.GetString());
15579  Flush();
15580 }
15581 
15582 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
15583  const VkMemoryRequirements& vkMemReq,
15584  bool requiresDedicatedAllocation,
15585  bool prefersDedicatedAllocation,
15586  const VmaAllocationCreateInfo& createInfo,
15587  VmaAllocation allocation)
15588 {
15589  CallParams callParams;
15590  GetBasicParams(callParams);
15591 
15592  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15593  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15594  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15595  vkMemReq.size,
15596  vkMemReq.alignment,
15597  vkMemReq.memoryTypeBits,
15598  requiresDedicatedAllocation ? 1 : 0,
15599  prefersDedicatedAllocation ? 1 : 0,
15600  createInfo.flags,
15601  createInfo.usage,
15602  createInfo.requiredFlags,
15603  createInfo.preferredFlags,
15604  createInfo.memoryTypeBits,
15605  createInfo.pool,
15606  allocation,
15607  userDataStr.GetString());
15608  Flush();
15609 }
15610 
15611 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
15612  const VkMemoryRequirements& vkMemReq,
15613  bool requiresDedicatedAllocation,
15614  bool prefersDedicatedAllocation,
15615  const VmaAllocationCreateInfo& createInfo,
15616  VmaAllocation allocation)
15617 {
15618  CallParams callParams;
15619  GetBasicParams(callParams);
15620 
15621  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15622  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15623  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15624  vkMemReq.size,
15625  vkMemReq.alignment,
15626  vkMemReq.memoryTypeBits,
15627  requiresDedicatedAllocation ? 1 : 0,
15628  prefersDedicatedAllocation ? 1 : 0,
15629  createInfo.flags,
15630  createInfo.usage,
15631  createInfo.requiredFlags,
15632  createInfo.preferredFlags,
15633  createInfo.memoryTypeBits,
15634  createInfo.pool,
15635  allocation,
15636  userDataStr.GetString());
15637  Flush();
15638 }
15639 
15640 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
15641  VmaAllocation allocation)
15642 {
15643  CallParams callParams;
15644  GetBasicParams(callParams);
15645 
15646  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15647  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15648  allocation);
15649  Flush();
15650 }
15651 
15652 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
15653  uint64_t allocationCount,
15654  const VmaAllocation* pAllocations)
15655 {
15656  CallParams callParams;
15657  GetBasicParams(callParams);
15658 
15659  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15660  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
15661  PrintPointerList(allocationCount, pAllocations);
15662  fprintf(m_File, "\n");
15663  Flush();
15664 }
15665 
15666 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
15667  VmaAllocation allocation,
15668  const void* pUserData)
15669 {
15670  CallParams callParams;
15671  GetBasicParams(callParams);
15672 
15673  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15674  UserDataString userDataStr(
15675  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
15676  pUserData);
15677  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15678  allocation,
15679  userDataStr.GetString());
15680  Flush();
15681 }
15682 
15683 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
15684  VmaAllocation allocation)
15685 {
15686  CallParams callParams;
15687  GetBasicParams(callParams);
15688 
15689  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15690  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15691  allocation);
15692  Flush();
15693 }
15694 
15695 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
15696  VmaAllocation allocation)
15697 {
15698  CallParams callParams;
15699  GetBasicParams(callParams);
15700 
15701  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15702  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15703  allocation);
15704  Flush();
15705 }
15706 
15707 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
15708  VmaAllocation allocation)
15709 {
15710  CallParams callParams;
15711  GetBasicParams(callParams);
15712 
15713  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15714  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15715  allocation);
15716  Flush();
15717 }
15718 
15719 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
15720  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15721 {
15722  CallParams callParams;
15723  GetBasicParams(callParams);
15724 
15725  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15726  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15727  allocation,
15728  offset,
15729  size);
15730  Flush();
15731 }
15732 
15733 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
15734  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15735 {
15736  CallParams callParams;
15737  GetBasicParams(callParams);
15738 
15739  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15740  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15741  allocation,
15742  offset,
15743  size);
15744  Flush();
15745 }
15746 
15747 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
15748  const VkBufferCreateInfo& bufCreateInfo,
15749  const VmaAllocationCreateInfo& allocCreateInfo,
15750  VmaAllocation allocation)
15751 {
15752  CallParams callParams;
15753  GetBasicParams(callParams);
15754 
15755  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15756  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15757  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15758  bufCreateInfo.flags,
15759  bufCreateInfo.size,
15760  bufCreateInfo.usage,
15761  bufCreateInfo.sharingMode,
15762  allocCreateInfo.flags,
15763  allocCreateInfo.usage,
15764  allocCreateInfo.requiredFlags,
15765  allocCreateInfo.preferredFlags,
15766  allocCreateInfo.memoryTypeBits,
15767  allocCreateInfo.pool,
15768  allocation,
15769  userDataStr.GetString());
15770  Flush();
15771 }
15772 
15773 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
15774  const VkImageCreateInfo& imageCreateInfo,
15775  const VmaAllocationCreateInfo& allocCreateInfo,
15776  VmaAllocation allocation)
15777 {
15778  CallParams callParams;
15779  GetBasicParams(callParams);
15780 
15781  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15782  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15783  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15784  imageCreateInfo.flags,
15785  imageCreateInfo.imageType,
15786  imageCreateInfo.format,
15787  imageCreateInfo.extent.width,
15788  imageCreateInfo.extent.height,
15789  imageCreateInfo.extent.depth,
15790  imageCreateInfo.mipLevels,
15791  imageCreateInfo.arrayLayers,
15792  imageCreateInfo.samples,
15793  imageCreateInfo.tiling,
15794  imageCreateInfo.usage,
15795  imageCreateInfo.sharingMode,
15796  imageCreateInfo.initialLayout,
15797  allocCreateInfo.flags,
15798  allocCreateInfo.usage,
15799  allocCreateInfo.requiredFlags,
15800  allocCreateInfo.preferredFlags,
15801  allocCreateInfo.memoryTypeBits,
15802  allocCreateInfo.pool,
15803  allocation,
15804  userDataStr.GetString());
15805  Flush();
15806 }
15807 
15808 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
15809  VmaAllocation allocation)
15810 {
15811  CallParams callParams;
15812  GetBasicParams(callParams);
15813 
15814  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15815  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
15816  allocation);
15817  Flush();
15818 }
15819 
15820 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
15821  VmaAllocation allocation)
15822 {
15823  CallParams callParams;
15824  GetBasicParams(callParams);
15825 
15826  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15827  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
15828  allocation);
15829  Flush();
15830 }
15831 
15832 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
15833  VmaAllocation allocation)
15834 {
15835  CallParams callParams;
15836  GetBasicParams(callParams);
15837 
15838  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15839  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15840  allocation);
15841  Flush();
15842 }
15843 
15844 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
15845  VmaAllocation allocation)
15846 {
15847  CallParams callParams;
15848  GetBasicParams(callParams);
15849 
15850  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15851  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
15852  allocation);
15853  Flush();
15854 }
15855 
15856 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
15857  VmaPool pool)
15858 {
15859  CallParams callParams;
15860  GetBasicParams(callParams);
15861 
15862  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15863  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
15864  pool);
15865  Flush();
15866 }
15867 
15868 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
15869  const VmaDefragmentationInfo2& info,
15871 {
15872  CallParams callParams;
15873  GetBasicParams(callParams);
15874 
15875  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15876  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
15877  info.flags);
15878  PrintPointerList(info.allocationCount, info.pAllocations);
15879  fprintf(m_File, ",");
15880  PrintPointerList(info.poolCount, info.pPools);
15881  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
15882  info.maxCpuBytesToMove,
15884  info.maxGpuBytesToMove,
15886  info.commandBuffer,
15887  ctx);
15888  Flush();
15889 }
15890 
15891 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
15893 {
15894  CallParams callParams;
15895  GetBasicParams(callParams);
15896 
15897  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15898  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
15899  ctx);
15900  Flush();
15901 }
15902 
15903 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
15904  VmaPool pool,
15905  const char* name)
15906 {
15907  CallParams callParams;
15908  GetBasicParams(callParams);
15909 
15910  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15911  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15912  pool, name != VMA_NULL ? name : "");
15913  Flush();
15914 }
15915 
15916 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
15917 {
15918  if(pUserData != VMA_NULL)
15919  {
15920  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
15921  {
15922  m_Str = (const char*)pUserData;
15923  }
15924  else
15925  {
15926  // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it.
15927  snprintf(m_PtrStr, 17, "%p", pUserData);
15928  m_Str = m_PtrStr;
15929  }
15930  }
15931  else
15932  {
15933  m_Str = "";
15934  }
15935 }
15936 
15937 void VmaRecorder::WriteConfiguration(
15938  const VkPhysicalDeviceProperties& devProps,
15939  const VkPhysicalDeviceMemoryProperties& memProps,
15940  uint32_t vulkanApiVersion,
15941  bool dedicatedAllocationExtensionEnabled,
15942  bool bindMemory2ExtensionEnabled,
15943  bool memoryBudgetExtensionEnabled,
15944  bool deviceCoherentMemoryExtensionEnabled)
15945 {
15946  fprintf(m_File, "Config,Begin\n");
15947 
15948  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
15949 
15950  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
15951  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
15952  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
15953  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
15954  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
15955  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
15956 
15957  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
15958  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
15959  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
15960 
15961  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
15962  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
15963  {
15964  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
15965  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
15966  }
15967  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
15968  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
15969  {
15970  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
15971  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
15972  }
15973 
15974  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
15975  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
15976  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
15977  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
15978 
15979  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
15980  fprintf(m_File, "Macro,VMA_MIN_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_MIN_ALIGNMENT);
15981  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
15982  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
15983  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
15984  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
15985  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
15986  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
15987  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15988 
15989  fprintf(m_File, "Config,End\n");
15990 }
15991 
15992 void VmaRecorder::GetBasicParams(CallParams& outParams)
15993 {
15994  #if defined(_WIN32)
15995  outParams.threadId = GetCurrentThreadId();
15996  #else
15997  // Use C++11 features to get thread id and convert it to uint32_t.
15998  // There is room for optimization since sstream is quite slow.
15999  // Is there a better way to convert std::this_thread::get_id() to uint32_t?
16000  std::thread::id thread_id = std::this_thread::get_id();
16001  std::stringstream thread_id_to_string_converter;
16002  thread_id_to_string_converter << thread_id;
16003  std::string thread_id_as_string = thread_id_to_string_converter.str();
16004  outParams.threadId = static_cast<uint32_t>(std::stoi(thread_id_as_string.c_str()));
16005  #endif
16006 
16007  auto current_time = std::chrono::high_resolution_clock::now();
16008 
16009  outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
16010 }
16011 
16012 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
16013 {
16014  if(count)
16015  {
16016  fprintf(m_File, "%p", pItems[0]);
16017  for(uint64_t i = 1; i < count; ++i)
16018  {
16019  fprintf(m_File, " %p", pItems[i]);
16020  }
16021  }
16022 }
16023 
16024 void VmaRecorder::Flush()
16025 {
16026  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
16027  {
16028  fflush(m_File);
16029  }
16030 }
16031 
16032 #endif // #if VMA_RECORDING_ENABLED
16033 
16035 // VmaAllocationObjectAllocator
16036 
16037 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
16038  m_Allocator(pAllocationCallbacks, 1024)
16039 {
16040 }
16041 
16042 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
16043 {
16044  VmaMutexLock mutexLock(m_Mutex);
16045  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
16046 }
16047 
16048 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
16049 {
16050  VmaMutexLock mutexLock(m_Mutex);
16051  m_Allocator.Free(hAlloc);
16052 }
16053 
16055 // VmaAllocator_T
16056 
16057 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
16058  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
16059  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
16060  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
16061  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
16062  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
16063  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
16064  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
16065  m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
16066  m_hDevice(pCreateInfo->device),
16067  m_hInstance(pCreateInfo->instance),
16068  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
16069  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
16070  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
16071  m_AllocationObjectAllocator(&m_AllocationCallbacks),
16072  m_HeapSizeLimitMask(0),
16073  m_DeviceMemoryCount(0),
16074  m_PreferredLargeHeapBlockSize(0),
16075  m_PhysicalDevice(pCreateInfo->physicalDevice),
16076  m_CurrentFrameIndex(0),
16077  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
16078  m_NextPoolId(0),
16079  m_GlobalMemoryTypeBits(UINT32_MAX)
16081  ,m_pRecorder(VMA_NULL)
16082 #endif
16083 {
16084  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16085  {
16086  m_UseKhrDedicatedAllocation = false;
16087  m_UseKhrBindMemory2 = false;
16088  }
16089 
16090  if(VMA_DEBUG_DETECT_CORRUPTION)
16091  {
16092  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
16093  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
16094  }
16095 
16096  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
16097 
16098  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
16099  {
16100 #if !(VMA_DEDICATED_ALLOCATION)
16102  {
16103  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
16104  }
16105 #endif
16106 #if !(VMA_BIND_MEMORY2)
16107  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
16108  {
16109  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
16110  }
16111 #endif
16112  }
16113 #if !(VMA_MEMORY_BUDGET)
16114  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
16115  {
16116  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
16117  }
16118 #endif
16119 #if !(VMA_BUFFER_DEVICE_ADDRESS)
16120  if(m_UseKhrBufferDeviceAddress)
16121  {
16122  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
16123  }
16124 #endif
16125 #if VMA_VULKAN_VERSION < 1002000
16126  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
16127  {
16128  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
16129  }
16130 #endif
16131 #if VMA_VULKAN_VERSION < 1001000
16132  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16133  {
16134  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
16135  }
16136 #endif
16137 #if !(VMA_MEMORY_PRIORITY)
16138  if(m_UseExtMemoryPriority)
16139  {
16140  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
16141  }
16142 #endif
16143 
16144  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
16145  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
16146  memset(&m_MemProps, 0, sizeof(m_MemProps));
16147 
16148  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
16149  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
16150 
16151 #if VMA_EXTERNAL_MEMORY
16152  memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));
16153 #endif // #if VMA_EXTERNAL_MEMORY
16154 
16155  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
16156  {
16157  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
16158  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
16159  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
16160  }
16161 
16162  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
16163 
16164  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
16165  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
16166 
16167  VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
16168  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
16169  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
16170  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
16171 
16172  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
16173  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
16174 
16175  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
16176 
16177 #if VMA_EXTERNAL_MEMORY
16178  if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)
16179  {
16180  memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,
16181  sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());
16182  }
16183 #endif // #if VMA_EXTERNAL_MEMORY
16184 
16185  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
16186  {
16187  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16188  {
16189  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
16190  if(limit != VK_WHOLE_SIZE)
16191  {
16192  m_HeapSizeLimitMask |= 1u << heapIndex;
16193  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
16194  {
16195  m_MemProps.memoryHeaps[heapIndex].size = limit;
16196  }
16197  }
16198  }
16199  }
16200 
16201  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16202  {
16203  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
16204 
16205  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
16206  this,
16207  VK_NULL_HANDLE, // hParentPool
16208  memTypeIndex,
16209  preferredBlockSize,
16210  0,
16211  SIZE_MAX,
16212  GetBufferImageGranularity(),
16213  pCreateInfo->frameInUseCount,
16214  false, // explicitBlockSize
16215  false, // linearAlgorithm
16216  0.5f, // priority (0.5 is the default per Vulkan spec)
16217  GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
16218  VMA_NULL); // // pMemoryAllocateNext
16219  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
16220  // becase minBlockCount is 0.
16221  }
16222 }
16223 
16224 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
16225 {
16226  VkResult res = VK_SUCCESS;
16227 
16228  if(pCreateInfo->pRecordSettings != VMA_NULL &&
16229  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
16230  {
16231 #if VMA_RECORDING_ENABLED
16232  m_pRecorder = vma_new(this, VmaRecorder)();
16233  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
16234  if(res != VK_SUCCESS)
16235  {
16236  return res;
16237  }
16238  m_pRecorder->WriteConfiguration(
16239  m_PhysicalDeviceProperties,
16240  m_MemProps,
16241  m_VulkanApiVersion,
16242  m_UseKhrDedicatedAllocation,
16243  m_UseKhrBindMemory2,
16244  m_UseExtMemoryBudget,
16245  m_UseAmdDeviceCoherentMemory);
16246  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
16247 #else
16248  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
16249  return VK_ERROR_FEATURE_NOT_PRESENT;
16250 #endif
16251  }
16252 
16253 #if VMA_MEMORY_BUDGET
16254  if(m_UseExtMemoryBudget)
16255  {
16256  UpdateVulkanBudget();
16257  }
16258 #endif // #if VMA_MEMORY_BUDGET
16259 
16260  return res;
16261 }
16262 
16263 VmaAllocator_T::~VmaAllocator_T()
16264 {
16265 #if VMA_RECORDING_ENABLED
16266  if(m_pRecorder != VMA_NULL)
16267  {
16268  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
16269  vma_delete(this, m_pRecorder);
16270  }
16271 #endif
16272 
16273  VMA_ASSERT(m_Pools.IsEmpty());
16274 
16275  for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
16276  {
16277  if(!m_DedicatedAllocations[memTypeIndex].IsEmpty())
16278  {
16279  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
16280  }
16281 
16282  vma_delete(this, m_pBlockVectors[memTypeIndex]);
16283  }
16284 }
16285 
16286 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
16287 {
16288 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
16289  ImportVulkanFunctions_Static();
16290 #endif
16291 
16292  if(pVulkanFunctions != VMA_NULL)
16293  {
16294  ImportVulkanFunctions_Custom(pVulkanFunctions);
16295  }
16296 
16297 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16298  ImportVulkanFunctions_Dynamic();
16299 #endif
16300 
16301  ValidateVulkanFunctions();
16302 }
16303 
16304 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
16305 
16306 void VmaAllocator_T::ImportVulkanFunctions_Static()
16307 {
16308  // Vulkan 1.0
16309  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
16310  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
16311  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
16312  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
16313  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
16314  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
16315  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
16316  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
16317  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
16318  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
16319  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
16320  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
16321  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
16322  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
16323  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
16324  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
16325  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
16326 
16327  // Vulkan 1.1
16328 #if VMA_VULKAN_VERSION >= 1001000
16329  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16330  {
16331  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
16332  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
16333  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
16334  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
16335  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
16336  }
16337 #endif
16338 }
16339 
16340 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
16341 
16342 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
16343 {
16344  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
16345 
16346 #define VMA_COPY_IF_NOT_NULL(funcName) \
16347  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
16348 
16349  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
16350  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
16351  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
16352  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
16353  VMA_COPY_IF_NOT_NULL(vkMapMemory);
16354  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
16355  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
16356  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
16357  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
16358  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
16359  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
16360  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
16361  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
16362  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
16363  VMA_COPY_IF_NOT_NULL(vkCreateImage);
16364  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
16365  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
16366 
16367 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16368  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
16369  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
16370 #endif
16371 
16372 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
16373  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
16374  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
16375 #endif
16376 
16377 #if VMA_MEMORY_BUDGET
16378  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
16379 #endif
16380 
16381 #undef VMA_COPY_IF_NOT_NULL
16382 }
16383 
16384 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16385 
16386 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
16387 {
16388 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
16389  if(m_VulkanFunctions.memberName == VMA_NULL) \
16390  m_VulkanFunctions.memberName = \
16391  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
16392 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
16393  if(m_VulkanFunctions.memberName == VMA_NULL) \
16394  m_VulkanFunctions.memberName = \
16395  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
16396 
16397  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
16398  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
16399  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
16400  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
16401  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
16402  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
16403  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
16404  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
16405  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
16406  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
16407  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
16408  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
16409  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
16410  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
16411  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
16412  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
16413  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
16414 
16415 #if VMA_VULKAN_VERSION >= 1001000
16416  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16417  {
16418  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
16419  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
16420  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
16421  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
16422  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
16423  }
16424 #endif
16425 
16426 #if VMA_DEDICATED_ALLOCATION
16427  if(m_UseKhrDedicatedAllocation)
16428  {
16429  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
16430  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
16431  }
16432 #endif
16433 
16434 #if VMA_BIND_MEMORY2
16435  if(m_UseKhrBindMemory2)
16436  {
16437  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
16438  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
16439  }
16440 #endif // #if VMA_BIND_MEMORY2
16441 
16442 #if VMA_MEMORY_BUDGET
16443  if(m_UseExtMemoryBudget)
16444  {
16445  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
16446  }
16447 #endif // #if VMA_MEMORY_BUDGET
16448 
16449 #undef VMA_FETCH_DEVICE_FUNC
16450 #undef VMA_FETCH_INSTANCE_FUNC
16451 }
16452 
16453 #endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16454 
16455 void VmaAllocator_T::ValidateVulkanFunctions()
16456 {
16457  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
16458  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
16459  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
16460  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
16461  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
16462  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
16463  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
16464  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
16465  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
16466  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
16467  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
16468  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
16469  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
16470  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
16471  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
16472  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
16473  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
16474 
16475 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16476  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
16477  {
16478  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
16479  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
16480  }
16481 #endif
16482 
16483 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
16484  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
16485  {
16486  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
16487  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
16488  }
16489 #endif
16490 
16491 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
16492  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16493  {
16494  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
16495  }
16496 #endif
16497 }
16498 
16499 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
16500 {
16501  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16502  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16503  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
16504  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
16505 }
16506 
16507 VkResult VmaAllocator_T::AllocateMemoryOfType(
16508  VkDeviceSize size,
16509  VkDeviceSize alignment,
16510  bool dedicatedAllocation,
16511  VkBuffer dedicatedBuffer,
16512  VkBufferUsageFlags dedicatedBufferUsage,
16513  VkImage dedicatedImage,
16514  const VmaAllocationCreateInfo& createInfo,
16515  uint32_t memTypeIndex,
16516  VmaSuballocationType suballocType,
16517  size_t allocationCount,
16518  VmaAllocation* pAllocations)
16519 {
16520  VMA_ASSERT(pAllocations != VMA_NULL);
16521  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
16522 
16523  VmaAllocationCreateInfo finalCreateInfo = createInfo;
16524 
16525  // If memory type is not HOST_VISIBLE, disable MAPPED.
16526  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16527  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16528  {
16529  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16530  }
16531  // If memory is lazily allocated, it should be always dedicated.
16532  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
16533  {
16535  }
16536 
16537  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
16538  VMA_ASSERT(blockVector);
16539 
16540  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
16541  bool preferDedicatedMemory =
16542  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
16543  dedicatedAllocation ||
16544  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
16545  size > preferredBlockSize / 2;
16546 
16547  if(preferDedicatedMemory &&
16548  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
16549  finalCreateInfo.pool == VK_NULL_HANDLE)
16550  {
16552  }
16553 
16554  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
16555  {
16556  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16557  {
16558  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16559  }
16560  else
16561  {
16562  return AllocateDedicatedMemory(
16563  size,
16564  suballocType,
16565  memTypeIndex,
16566  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16567  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16568  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16569  finalCreateInfo.pUserData,
16570  finalCreateInfo.priority,
16571  dedicatedBuffer,
16572  dedicatedBufferUsage,
16573  dedicatedImage,
16574  allocationCount,
16575  pAllocations);
16576  }
16577  }
16578  else
16579  {
16580  VkResult res = blockVector->Allocate(
16581  m_CurrentFrameIndex.load(),
16582  size,
16583  alignment,
16584  finalCreateInfo,
16585  suballocType,
16586  allocationCount,
16587  pAllocations);
16588  if(res == VK_SUCCESS)
16589  {
16590  return res;
16591  }
16592 
16593  // 5. Try dedicated memory.
16594  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16595  {
16596  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16597  }
16598 
16599  // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
16600  // which can quickly deplete maxMemoryAllocationCount: Don't try dedicated allocations when above
16601  // 3/4 of the maximum allocation count.
16602  if(m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
16603  {
16604  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16605  }
16606 
16607  res = AllocateDedicatedMemory(
16608  size,
16609  suballocType,
16610  memTypeIndex,
16611  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16612  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16613  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16614  finalCreateInfo.pUserData,
16615  finalCreateInfo.priority,
16616  dedicatedBuffer,
16617  dedicatedBufferUsage,
16618  dedicatedImage,
16619  allocationCount,
16620  pAllocations);
16621  if(res == VK_SUCCESS)
16622  {
16623  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
16624  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
16625  return VK_SUCCESS;
16626  }
16627  else
16628  {
16629  // Everything failed: Return error code.
16630  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16631  return res;
16632  }
16633  }
16634 }
16635 
16636 VkResult VmaAllocator_T::AllocateDedicatedMemory(
16637  VkDeviceSize size,
16638  VmaSuballocationType suballocType,
16639  uint32_t memTypeIndex,
16640  bool withinBudget,
16641  bool map,
16642  bool isUserDataString,
16643  void* pUserData,
16644  float priority,
16645  VkBuffer dedicatedBuffer,
16646  VkBufferUsageFlags dedicatedBufferUsage,
16647  VkImage dedicatedImage,
16648  size_t allocationCount,
16649  VmaAllocation* pAllocations)
16650 {
16651  VMA_ASSERT(allocationCount > 0 && pAllocations);
16652 
16653  if(withinBudget)
16654  {
16655  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16656  VmaBudget heapBudget = {};
16657  GetBudget(&heapBudget, heapIndex, 1);
16658  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
16659  {
16660  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16661  }
16662  }
16663 
16664  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
16665  allocInfo.memoryTypeIndex = memTypeIndex;
16666  allocInfo.allocationSize = size;
16667 
16668 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16669  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
16670  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16671  {
16672  if(dedicatedBuffer != VK_NULL_HANDLE)
16673  {
16674  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
16675  dedicatedAllocInfo.buffer = dedicatedBuffer;
16676  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16677  }
16678  else if(dedicatedImage != VK_NULL_HANDLE)
16679  {
16680  dedicatedAllocInfo.image = dedicatedImage;
16681  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16682  }
16683  }
16684 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16685 
16686 #if VMA_BUFFER_DEVICE_ADDRESS
16687  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
16688  if(m_UseKhrBufferDeviceAddress)
16689  {
16690  bool canContainBufferWithDeviceAddress = true;
16691  if(dedicatedBuffer != VK_NULL_HANDLE)
16692  {
16693  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
16694  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
16695  }
16696  else if(dedicatedImage != VK_NULL_HANDLE)
16697  {
16698  canContainBufferWithDeviceAddress = false;
16699  }
16700  if(canContainBufferWithDeviceAddress)
16701  {
16702  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
16703  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
16704  }
16705  }
16706 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
16707 
16708 #if VMA_MEMORY_PRIORITY
16709  VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
16710  if(m_UseExtMemoryPriority)
16711  {
16712  priorityInfo.priority = priority;
16713  VmaPnextChainPushFront(&allocInfo, &priorityInfo);
16714  }
16715 #endif // #if VMA_MEMORY_PRIORITY
16716 
16717 #if VMA_EXTERNAL_MEMORY
16718  // Attach VkExportMemoryAllocateInfoKHR if necessary.
16719  VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
16720  exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);
16721  if(exportMemoryAllocInfo.handleTypes != 0)
16722  {
16723  VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
16724  }
16725 #endif // #if VMA_EXTERNAL_MEMORY
16726 
16727  size_t allocIndex;
16728  VkResult res = VK_SUCCESS;
16729  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16730  {
16731  res = AllocateDedicatedMemoryPage(
16732  size,
16733  suballocType,
16734  memTypeIndex,
16735  allocInfo,
16736  map,
16737  isUserDataString,
16738  pUserData,
16739  pAllocations + allocIndex);
16740  if(res != VK_SUCCESS)
16741  {
16742  break;
16743  }
16744  }
16745 
16746  if(res == VK_SUCCESS)
16747  {
16748  // Register them in m_DedicatedAllocations.
16749  {
16750  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16751  DedicatedAllocationLinkedList& dedicatedAllocations = m_DedicatedAllocations[memTypeIndex];
16752  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16753  {
16754  dedicatedAllocations.PushBack(pAllocations[allocIndex]);
16755  }
16756  }
16757 
16758  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
16759  }
16760  else
16761  {
16762  // Free all already created allocations.
16763  while(allocIndex--)
16764  {
16765  VmaAllocation currAlloc = pAllocations[allocIndex];
16766  VkDeviceMemory hMemory = currAlloc->GetMemory();
16767 
16768  /*
16769  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16770  before vkFreeMemory.
16771 
16772  if(currAlloc->GetMappedData() != VMA_NULL)
16773  {
16774  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16775  }
16776  */
16777 
16778  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
16779  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
16780  currAlloc->SetUserData(this, VMA_NULL);
16781  m_AllocationObjectAllocator.Free(currAlloc);
16782  }
16783 
16784  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16785  }
16786 
16787  return res;
16788 }
16789 
16790 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
16791  VkDeviceSize size,
16792  VmaSuballocationType suballocType,
16793  uint32_t memTypeIndex,
16794  const VkMemoryAllocateInfo& allocInfo,
16795  bool map,
16796  bool isUserDataString,
16797  void* pUserData,
16798  VmaAllocation* pAllocation)
16799 {
16800  VkDeviceMemory hMemory = VK_NULL_HANDLE;
16801  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
16802  if(res < 0)
16803  {
16804  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16805  return res;
16806  }
16807 
16808  void* pMappedData = VMA_NULL;
16809  if(map)
16810  {
16811  res = (*m_VulkanFunctions.vkMapMemory)(
16812  m_hDevice,
16813  hMemory,
16814  0,
16815  VK_WHOLE_SIZE,
16816  0,
16817  &pMappedData);
16818  if(res < 0)
16819  {
16820  VMA_DEBUG_LOG(" vkMapMemory FAILED");
16821  FreeVulkanMemory(memTypeIndex, size, hMemory);
16822  return res;
16823  }
16824  }
16825 
16826  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
16827  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
16828  (*pAllocation)->SetUserData(this, pUserData);
16829  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
16830  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16831  {
16832  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
16833  }
16834 
16835  return VK_SUCCESS;
16836 }
16837 
16838 void VmaAllocator_T::GetBufferMemoryRequirements(
16839  VkBuffer hBuffer,
16840  VkMemoryRequirements& memReq,
16841  bool& requiresDedicatedAllocation,
16842  bool& prefersDedicatedAllocation) const
16843 {
16844 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16845  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16846  {
16847  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
16848  memReqInfo.buffer = hBuffer;
16849 
16850  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16851 
16852  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16853  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16854 
16855  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16856 
16857  memReq = memReq2.memoryRequirements;
16858  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16859  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16860  }
16861  else
16862 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16863  {
16864  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
16865  requiresDedicatedAllocation = false;
16866  prefersDedicatedAllocation = false;
16867  }
16868 }
16869 
16870 void VmaAllocator_T::GetImageMemoryRequirements(
16871  VkImage hImage,
16872  VkMemoryRequirements& memReq,
16873  bool& requiresDedicatedAllocation,
16874  bool& prefersDedicatedAllocation) const
16875 {
16876 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16877  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16878  {
16879  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
16880  memReqInfo.image = hImage;
16881 
16882  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16883 
16884  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16885  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16886 
16887  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16888 
16889  memReq = memReq2.memoryRequirements;
16890  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16891  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16892  }
16893  else
16894 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16895  {
16896  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
16897  requiresDedicatedAllocation = false;
16898  prefersDedicatedAllocation = false;
16899  }
16900 }
16901 
16902 VkResult VmaAllocator_T::AllocateMemory(
16903  const VkMemoryRequirements& vkMemReq,
16904  bool requiresDedicatedAllocation,
16905  bool prefersDedicatedAllocation,
16906  VkBuffer dedicatedBuffer,
16907  VkBufferUsageFlags dedicatedBufferUsage,
16908  VkImage dedicatedImage,
16909  const VmaAllocationCreateInfo& createInfo,
16910  VmaSuballocationType suballocType,
16911  size_t allocationCount,
16912  VmaAllocation* pAllocations)
16913 {
16914  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16915 
16916  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
16917 
16918  if(vkMemReq.size == 0)
16919  {
16920  return VK_ERROR_VALIDATION_FAILED_EXT;
16921  }
16922  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
16923  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16924  {
16925  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
16926  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16927  }
16928  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16930  {
16931  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
16932  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16933  }
16934  if(requiresDedicatedAllocation)
16935  {
16936  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16937  {
16938  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
16939  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16940  }
16941  if(createInfo.pool != VK_NULL_HANDLE)
16942  {
16943  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
16944  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16945  }
16946  }
16947  if((createInfo.pool != VK_NULL_HANDLE) &&
16948  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
16949  {
16950  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
16951  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16952  }
16953 
16954  if(createInfo.pool != VK_NULL_HANDLE)
16955  {
16956  VmaAllocationCreateInfo createInfoForPool = createInfo;
16957  // If memory type is not HOST_VISIBLE, disable MAPPED.
16958  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16959  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16960  {
16961  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16962  }
16963 
16964  return createInfo.pool->m_BlockVector.Allocate(
16965  m_CurrentFrameIndex.load(),
16966  vkMemReq.size,
16967  vkMemReq.alignment,
16968  createInfoForPool,
16969  suballocType,
16970  allocationCount,
16971  pAllocations);
16972  }
16973  else
16974  {
16975  // Bit mask of memory Vulkan types acceptable for this allocation.
16976  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
16977  uint32_t memTypeIndex = UINT32_MAX;
16978  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16979  if(res == VK_SUCCESS)
16980  {
16981  res = AllocateMemoryOfType(
16982  vkMemReq.size,
16983  vkMemReq.alignment,
16984  requiresDedicatedAllocation || prefersDedicatedAllocation,
16985  dedicatedBuffer,
16986  dedicatedBufferUsage,
16987  dedicatedImage,
16988  createInfo,
16989  memTypeIndex,
16990  suballocType,
16991  allocationCount,
16992  pAllocations);
16993  // Succeeded on first try.
16994  if(res == VK_SUCCESS)
16995  {
16996  return res;
16997  }
16998  // Allocation from this memory type failed. Try other compatible memory types.
16999  else
17000  {
17001  for(;;)
17002  {
17003  // Remove old memTypeIndex from list of possibilities.
17004  memoryTypeBits &= ~(1u << memTypeIndex);
17005  // Find alternative memTypeIndex.
17006  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
17007  if(res == VK_SUCCESS)
17008  {
17009  res = AllocateMemoryOfType(
17010  vkMemReq.size,
17011  vkMemReq.alignment,
17012  requiresDedicatedAllocation || prefersDedicatedAllocation,
17013  dedicatedBuffer,
17014  dedicatedBufferUsage,
17015  dedicatedImage,
17016  createInfo,
17017  memTypeIndex,
17018  suballocType,
17019  allocationCount,
17020  pAllocations);
17021  // Allocation from this alternative memory type succeeded.
17022  if(res == VK_SUCCESS)
17023  {
17024  return res;
17025  }
17026  // else: Allocation from this memory type failed. Try next one - next loop iteration.
17027  }
17028  // No other matching memory type index could be found.
17029  else
17030  {
17031  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
17032  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
17033  }
17034  }
17035  }
17036  }
17037  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
17038  else
17039  return res;
17040  }
17041 }
17042 
17043 void VmaAllocator_T::FreeMemory(
17044  size_t allocationCount,
17045  const VmaAllocation* pAllocations)
17046 {
17047  VMA_ASSERT(pAllocations);
17048 
17049  for(size_t allocIndex = allocationCount; allocIndex--; )
17050  {
17051  VmaAllocation allocation = pAllocations[allocIndex];
17052 
17053  if(allocation != VK_NULL_HANDLE)
17054  {
17055  if(TouchAllocation(allocation))
17056  {
17057  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
17058  {
17059  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
17060  }
17061 
17062  switch(allocation->GetType())
17063  {
17064  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17065  {
17066  VmaBlockVector* pBlockVector = VMA_NULL;
17067  VmaPool hPool = allocation->GetBlock()->GetParentPool();
17068  if(hPool != VK_NULL_HANDLE)
17069  {
17070  pBlockVector = &hPool->m_BlockVector;
17071  }
17072  else
17073  {
17074  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17075  pBlockVector = m_pBlockVectors[memTypeIndex];
17076  }
17077  pBlockVector->Free(allocation);
17078  }
17079  break;
17080  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17081  FreeDedicatedMemory(allocation);
17082  break;
17083  default:
17084  VMA_ASSERT(0);
17085  }
17086  }
17087 
17088  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
17089  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
17090  allocation->SetUserData(this, VMA_NULL);
17091  m_AllocationObjectAllocator.Free(allocation);
17092  }
17093  }
17094 }
17095 
17096 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
17097 {
17098  // Initialize.
17099  InitStatInfo(pStats->total);
17100  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
17101  InitStatInfo(pStats->memoryType[i]);
17102  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
17103  InitStatInfo(pStats->memoryHeap[i]);
17104 
17105  // Process default pools.
17106  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17107  {
17108  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
17109  VMA_ASSERT(pBlockVector);
17110  pBlockVector->AddStats(pStats);
17111  }
17112 
17113  // Process custom pools.
17114  {
17115  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17116  for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
17117  {
17118  pool->m_BlockVector.AddStats(pStats);
17119  }
17120  }
17121 
17122  // Process dedicated allocations.
17123  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17124  {
17125  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
17126  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17127  DedicatedAllocationLinkedList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
17128  for(VmaAllocation alloc = dedicatedAllocList.Front();
17129  alloc != VMA_NULL; alloc = dedicatedAllocList.GetNext(alloc))
17130  {
17131  VmaStatInfo allocationStatInfo;
17132  alloc->DedicatedAllocCalcStatsInfo(allocationStatInfo);
17133  VmaAddStatInfo(pStats->total, allocationStatInfo);
17134  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
17135  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
17136  }
17137  }
17138 
17139  // Postprocess.
17140  VmaPostprocessCalcStatInfo(pStats->total);
17141  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
17142  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
17143  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
17144  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
17145 }
17146 
17147 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
17148 {
17149 #if VMA_MEMORY_BUDGET
17150  if(m_UseExtMemoryBudget)
17151  {
17152  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
17153  {
17154  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
17155  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
17156  {
17157  const uint32_t heapIndex = firstHeap + i;
17158 
17159  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
17160  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
17161 
17162  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
17163  {
17164  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
17165  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
17166  }
17167  else
17168  {
17169  outBudget->usage = 0;
17170  }
17171 
17172  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
17173  outBudget->budget = VMA_MIN(
17174  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
17175  }
17176  }
17177  else
17178  {
17179  UpdateVulkanBudget(); // Outside of mutex lock
17180  GetBudget(outBudget, firstHeap, heapCount); // Recursion
17181  }
17182  }
17183  else
17184 #endif
17185  {
17186  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
17187  {
17188  const uint32_t heapIndex = firstHeap + i;
17189 
17190  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
17191  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
17192 
17193  outBudget->usage = outBudget->blockBytes;
17194  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
17195  }
17196  }
17197 }
17198 
17199 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
17200 
17201 VkResult VmaAllocator_T::DefragmentationBegin(
17202  const VmaDefragmentationInfo2& info,
17203  VmaDefragmentationStats* pStats,
17204  VmaDefragmentationContext* pContext)
17205 {
17206  if(info.pAllocationsChanged != VMA_NULL)
17207  {
17208  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
17209  }
17210 
17211  *pContext = vma_new(this, VmaDefragmentationContext_T)(
17212  this, m_CurrentFrameIndex.load(), info.flags, pStats);
17213 
17214  (*pContext)->AddPools(info.poolCount, info.pPools);
17215  (*pContext)->AddAllocations(
17217 
17218  VkResult res = (*pContext)->Defragment(
17221  info.commandBuffer, pStats, info.flags);
17222 
17223  if(res != VK_NOT_READY)
17224  {
17225  vma_delete(this, *pContext);
17226  *pContext = VMA_NULL;
17227  }
17228 
17229  return res;
17230 }
17231 
17232 VkResult VmaAllocator_T::DefragmentationEnd(
17233  VmaDefragmentationContext context)
17234 {
17235  vma_delete(this, context);
17236  return VK_SUCCESS;
17237 }
17238 
17239 VkResult VmaAllocator_T::DefragmentationPassBegin(
17241  VmaDefragmentationContext context)
17242 {
17243  return context->DefragmentPassBegin(pInfo);
17244 }
17245 VkResult VmaAllocator_T::DefragmentationPassEnd(
17246  VmaDefragmentationContext context)
17247 {
17248  return context->DefragmentPassEnd();
17249 
17250 }
17251 
17252 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
17253 {
17254  if(hAllocation->CanBecomeLost())
17255  {
17256  /*
17257  Warning: This is a carefully designed algorithm.
17258  Do not modify unless you really know what you're doing :)
17259  */
17260  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17261  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17262  for(;;)
17263  {
17264  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
17265  {
17266  pAllocationInfo->memoryType = UINT32_MAX;
17267  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
17268  pAllocationInfo->offset = 0;
17269  pAllocationInfo->size = hAllocation->GetSize();
17270  pAllocationInfo->pMappedData = VMA_NULL;
17271  pAllocationInfo->pUserData = hAllocation->GetUserData();
17272  return;
17273  }
17274  else if(localLastUseFrameIndex == localCurrFrameIndex)
17275  {
17276  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
17277  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
17278  pAllocationInfo->offset = hAllocation->GetOffset();
17279  pAllocationInfo->size = hAllocation->GetSize();
17280  pAllocationInfo->pMappedData = VMA_NULL;
17281  pAllocationInfo->pUserData = hAllocation->GetUserData();
17282  return;
17283  }
17284  else // Last use time earlier than current time.
17285  {
17286  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17287  {
17288  localLastUseFrameIndex = localCurrFrameIndex;
17289  }
17290  }
17291  }
17292  }
17293  else
17294  {
17295 #if VMA_STATS_STRING_ENABLED
17296  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17297  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17298  for(;;)
17299  {
17300  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
17301  if(localLastUseFrameIndex == localCurrFrameIndex)
17302  {
17303  break;
17304  }
17305  else // Last use time earlier than current time.
17306  {
17307  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17308  {
17309  localLastUseFrameIndex = localCurrFrameIndex;
17310  }
17311  }
17312  }
17313 #endif
17314 
17315  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
17316  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
17317  pAllocationInfo->offset = hAllocation->GetOffset();
17318  pAllocationInfo->size = hAllocation->GetSize();
17319  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
17320  pAllocationInfo->pUserData = hAllocation->GetUserData();
17321  }
17322 }
17323 
17324 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
17325 {
17326  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
17327  if(hAllocation->CanBecomeLost())
17328  {
17329  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17330  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17331  for(;;)
17332  {
17333  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
17334  {
17335  return false;
17336  }
17337  else if(localLastUseFrameIndex == localCurrFrameIndex)
17338  {
17339  return true;
17340  }
17341  else // Last use time earlier than current time.
17342  {
17343  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17344  {
17345  localLastUseFrameIndex = localCurrFrameIndex;
17346  }
17347  }
17348  }
17349  }
17350  else
17351  {
17352 #if VMA_STATS_STRING_ENABLED
17353  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17354  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17355  for(;;)
17356  {
17357  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
17358  if(localLastUseFrameIndex == localCurrFrameIndex)
17359  {
17360  break;
17361  }
17362  else // Last use time earlier than current time.
17363  {
17364  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17365  {
17366  localLastUseFrameIndex = localCurrFrameIndex;
17367  }
17368  }
17369  }
17370 #endif
17371 
17372  return true;
17373  }
17374 }
17375 
17376 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
17377 {
17378  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
17379 
17380  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
17381 
17382  // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
17383  if(pCreateInfo->pMemoryAllocateNext)
17384  {
17385  VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
17386  }
17387 
17388  if(newCreateInfo.maxBlockCount == 0)
17389  {
17390  newCreateInfo.maxBlockCount = SIZE_MAX;
17391  }
17392  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
17393  {
17394  return VK_ERROR_INITIALIZATION_FAILED;
17395  }
17396  // Memory type index out of range or forbidden.
17397  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
17398  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
17399  {
17400  return VK_ERROR_FEATURE_NOT_PRESENT;
17401  }
17402  if(newCreateInfo.minAllocationAlignment > 0)
17403  {
17404  VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
17405  }
17406 
17407  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
17408 
17409  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
17410 
17411  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
17412  if(res != VK_SUCCESS)
17413  {
17414  vma_delete(this, *pPool);
17415  *pPool = VMA_NULL;
17416  return res;
17417  }
17418 
17419  // Add to m_Pools.
17420  {
17421  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
17422  (*pPool)->SetId(m_NextPoolId++);
17423  m_Pools.PushBack(*pPool);
17424  }
17425 
17426  return VK_SUCCESS;
17427 }
17428 
17429 void VmaAllocator_T::DestroyPool(VmaPool pool)
17430 {
17431  // Remove from m_Pools.
17432  {
17433  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
17434  m_Pools.Remove(pool);
17435  }
17436 
17437  vma_delete(this, pool);
17438 }
17439 
17440 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
17441 {
17442  pool->m_BlockVector.GetPoolStats(pPoolStats);
17443 }
17444 
17445 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
17446 {
17447  m_CurrentFrameIndex.store(frameIndex);
17448 
17449 #if VMA_MEMORY_BUDGET
17450  if(m_UseExtMemoryBudget)
17451  {
17452  UpdateVulkanBudget();
17453  }
17454 #endif // #if VMA_MEMORY_BUDGET
17455 }
17456 
17457 void VmaAllocator_T::MakePoolAllocationsLost(
17458  VmaPool hPool,
17459  size_t* pLostAllocationCount)
17460 {
17461  hPool->m_BlockVector.MakePoolAllocationsLost(
17462  m_CurrentFrameIndex.load(),
17463  pLostAllocationCount);
17464 }
17465 
17466 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
17467 {
17468  return hPool->m_BlockVector.CheckCorruption();
17469 }
17470 
17471 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
17472 {
17473  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
17474 
17475  // Process default pools.
17476  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17477  {
17478  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
17479  {
17480  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
17481  VMA_ASSERT(pBlockVector);
17482  VkResult localRes = pBlockVector->CheckCorruption();
17483  switch(localRes)
17484  {
17485  case VK_ERROR_FEATURE_NOT_PRESENT:
17486  break;
17487  case VK_SUCCESS:
17488  finalRes = VK_SUCCESS;
17489  break;
17490  default:
17491  return localRes;
17492  }
17493  }
17494  }
17495 
17496  // Process custom pools.
17497  {
17498  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17499  for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
17500  {
17501  if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
17502  {
17503  VkResult localRes = pool->m_BlockVector.CheckCorruption();
17504  switch(localRes)
17505  {
17506  case VK_ERROR_FEATURE_NOT_PRESENT:
17507  break;
17508  case VK_SUCCESS:
17509  finalRes = VK_SUCCESS;
17510  break;
17511  default:
17512  return localRes;
17513  }
17514  }
17515  }
17516  }
17517 
17518  return finalRes;
17519 }
17520 
17521 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
17522 {
17523  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
17524  (*pAllocation)->InitLost();
17525 }
17526 
17527 // An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
17528 template<typename T>
17529 struct AtomicTransactionalIncrement
17530 {
17531 public:
17532  typedef std::atomic<T> AtomicT;
17533  ~AtomicTransactionalIncrement()
17534  {
17535  if(m_Atomic)
17536  --(*m_Atomic);
17537  }
17538  T Increment(AtomicT* atomic)
17539  {
17540  m_Atomic = atomic;
17541  return m_Atomic->fetch_add(1);
17542  }
17543  void Commit()
17544  {
17545  m_Atomic = nullptr;
17546  }
17547 
17548 private:
17549  AtomicT* m_Atomic = nullptr;
17550 };
17551 
17552 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
17553 {
17554  AtomicTransactionalIncrement<uint32_t> deviceMemoryCountIncrement;
17555  const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
17556 #if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
17557  if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
17558  {
17559  return VK_ERROR_TOO_MANY_OBJECTS;
17560  }
17561 #endif
17562 
17563  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
17564 
17565  // HeapSizeLimit is in effect for this heap.
17566  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
17567  {
17568  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
17569  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
17570  for(;;)
17571  {
17572  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
17573  if(blockBytesAfterAllocation > heapSize)
17574  {
17575  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
17576  }
17577  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
17578  {
17579  break;
17580  }
17581  }
17582  }
17583  else
17584  {
17585  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
17586  }
17587 
17588  // VULKAN CALL vkAllocateMemory.
17589  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
17590 
17591  if(res == VK_SUCCESS)
17592  {
17593 #if VMA_MEMORY_BUDGET
17594  ++m_Budget.m_OperationsSinceBudgetFetch;
17595 #endif
17596 
17597  // Informative callback.
17598  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
17599  {
17600  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
17601  }
17602 
17603  deviceMemoryCountIncrement.Commit();
17604  }
17605  else
17606  {
17607  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
17608  }
17609 
17610  return res;
17611 }
17612 
17613 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
17614 {
17615  // Informative callback.
17616  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
17617  {
17618  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
17619  }
17620 
17621  // VULKAN CALL vkFreeMemory.
17622  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
17623 
17624  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
17625 
17626  --m_DeviceMemoryCount;
17627 }
17628 
17629 VkResult VmaAllocator_T::BindVulkanBuffer(
17630  VkDeviceMemory memory,
17631  VkDeviceSize memoryOffset,
17632  VkBuffer buffer,
17633  const void* pNext)
17634 {
17635  if(pNext != VMA_NULL)
17636  {
17637 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17638  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17639  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
17640  {
17641  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
17642  bindBufferMemoryInfo.pNext = pNext;
17643  bindBufferMemoryInfo.buffer = buffer;
17644  bindBufferMemoryInfo.memory = memory;
17645  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17646  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17647  }
17648  else
17649 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17650  {
17651  return VK_ERROR_EXTENSION_NOT_PRESENT;
17652  }
17653  }
17654  else
17655  {
17656  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
17657  }
17658 }
17659 
17660 VkResult VmaAllocator_T::BindVulkanImage(
17661  VkDeviceMemory memory,
17662  VkDeviceSize memoryOffset,
17663  VkImage image,
17664  const void* pNext)
17665 {
17666  if(pNext != VMA_NULL)
17667  {
17668 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17669  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17670  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
17671  {
17672  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
17673  bindBufferMemoryInfo.pNext = pNext;
17674  bindBufferMemoryInfo.image = image;
17675  bindBufferMemoryInfo.memory = memory;
17676  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17677  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17678  }
17679  else
17680 #endif // #if VMA_BIND_MEMORY2
17681  {
17682  return VK_ERROR_EXTENSION_NOT_PRESENT;
17683  }
17684  }
17685  else
17686  {
17687  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
17688  }
17689 }
17690 
17691 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
17692 {
17693  if(hAllocation->CanBecomeLost())
17694  {
17695  return VK_ERROR_MEMORY_MAP_FAILED;
17696  }
17697 
17698  switch(hAllocation->GetType())
17699  {
17700  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17701  {
17702  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17703  char *pBytes = VMA_NULL;
17704  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
17705  if(res == VK_SUCCESS)
17706  {
17707  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
17708  hAllocation->BlockAllocMap();
17709  }
17710  return res;
17711  }
17712  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17713  return hAllocation->DedicatedAllocMap(this, ppData);
17714  default:
17715  VMA_ASSERT(0);
17716  return VK_ERROR_MEMORY_MAP_FAILED;
17717  }
17718 }
17719 
17720 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
17721 {
17722  switch(hAllocation->GetType())
17723  {
17724  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17725  {
17726  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17727  hAllocation->BlockAllocUnmap();
17728  pBlock->Unmap(this, 1);
17729  }
17730  break;
17731  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17732  hAllocation->DedicatedAllocUnmap(this);
17733  break;
17734  default:
17735  VMA_ASSERT(0);
17736  }
17737 }
17738 
17739 VkResult VmaAllocator_T::BindBufferMemory(
17740  VmaAllocation hAllocation,
17741  VkDeviceSize allocationLocalOffset,
17742  VkBuffer hBuffer,
17743  const void* pNext)
17744 {
17745  VkResult res = VK_SUCCESS;
17746  switch(hAllocation->GetType())
17747  {
17748  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17749  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
17750  break;
17751  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17752  {
17753  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17754  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
17755  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
17756  break;
17757  }
17758  default:
17759  VMA_ASSERT(0);
17760  }
17761  return res;
17762 }
17763 
17764 VkResult VmaAllocator_T::BindImageMemory(
17765  VmaAllocation hAllocation,
17766  VkDeviceSize allocationLocalOffset,
17767  VkImage hImage,
17768  const void* pNext)
17769 {
17770  VkResult res = VK_SUCCESS;
17771  switch(hAllocation->GetType())
17772  {
17773  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17774  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
17775  break;
17776  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17777  {
17778  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
17779  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
17780  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
17781  break;
17782  }
17783  default:
17784  VMA_ASSERT(0);
17785  }
17786  return res;
17787 }
17788 
17789 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
17790  VmaAllocation hAllocation,
17791  VkDeviceSize offset, VkDeviceSize size,
17792  VMA_CACHE_OPERATION op)
17793 {
17794  VkResult res = VK_SUCCESS;
17795 
17796  VkMappedMemoryRange memRange = {};
17797  if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
17798  {
17799  switch(op)
17800  {
17801  case VMA_CACHE_FLUSH:
17802  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
17803  break;
17804  case VMA_CACHE_INVALIDATE:
17805  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
17806  break;
17807  default:
17808  VMA_ASSERT(0);
17809  }
17810  }
17811  // else: Just ignore this call.
17812  return res;
17813 }
17814 
17815 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
17816  uint32_t allocationCount,
17817  const VmaAllocation* allocations,
17818  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
17819  VMA_CACHE_OPERATION op)
17820 {
17821  typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
17822  typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
17823  RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
17824 
17825  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
17826  {
17827  const VmaAllocation alloc = allocations[allocIndex];
17828  const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
17829  const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
17830  VkMappedMemoryRange newRange;
17831  if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
17832  {
17833  ranges.push_back(newRange);
17834  }
17835  }
17836 
17837  VkResult res = VK_SUCCESS;
17838  if(!ranges.empty())
17839  {
17840  switch(op)
17841  {
17842  case VMA_CACHE_FLUSH:
17843  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17844  break;
17845  case VMA_CACHE_INVALIDATE:
17846  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17847  break;
17848  default:
17849  VMA_ASSERT(0);
17850  }
17851  }
17852  // else: Just ignore this call.
17853  return res;
17854 }
17855 
17856 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
17857 {
17858  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
17859 
17860  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17861  {
17862  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17863  DedicatedAllocationLinkedList& dedicatedAllocations = m_DedicatedAllocations[memTypeIndex];
17864  dedicatedAllocations.Remove(allocation);
17865  }
17866 
17867  VkDeviceMemory hMemory = allocation->GetMemory();
17868 
17869  /*
17870  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
17871  before vkFreeMemory.
17872 
17873  if(allocation->GetMappedData() != VMA_NULL)
17874  {
17875  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
17876  }
17877  */
17878 
17879  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
17880 
17881  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
17882 }
17883 
17884 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
17885 {
17886  VkBufferCreateInfo dummyBufCreateInfo;
17887  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
17888 
17889  uint32_t memoryTypeBits = 0;
17890 
17891  // Create buffer.
17892  VkBuffer buf = VK_NULL_HANDLE;
17893  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
17894  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
17895  if(res == VK_SUCCESS)
17896  {
17897  // Query for supported memory types.
17898  VkMemoryRequirements memReq;
17899  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
17900  memoryTypeBits = memReq.memoryTypeBits;
17901 
17902  // Destroy buffer.
17903  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
17904  }
17905 
17906  return memoryTypeBits;
17907 }
17908 
17909 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
17910 {
17911  // Make sure memory information is already fetched.
17912  VMA_ASSERT(GetMemoryTypeCount() > 0);
17913 
17914  uint32_t memoryTypeBits = UINT32_MAX;
17915 
17916  if(!m_UseAmdDeviceCoherentMemory)
17917  {
17918  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
17919  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17920  {
17921  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17922  {
17923  memoryTypeBits &= ~(1u << memTypeIndex);
17924  }
17925  }
17926  }
17927 
17928  return memoryTypeBits;
17929 }
17930 
17931 bool VmaAllocator_T::GetFlushOrInvalidateRange(
17932  VmaAllocation allocation,
17933  VkDeviceSize offset, VkDeviceSize size,
17934  VkMappedMemoryRange& outRange) const
17935 {
17936  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17937  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
17938  {
17939  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
17940  const VkDeviceSize allocationSize = allocation->GetSize();
17941  VMA_ASSERT(offset <= allocationSize);
17942 
17943  outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
17944  outRange.pNext = VMA_NULL;
17945  outRange.memory = allocation->GetMemory();
17946 
17947  switch(allocation->GetType())
17948  {
17949  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17950  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17951  if(size == VK_WHOLE_SIZE)
17952  {
17953  outRange.size = allocationSize - outRange.offset;
17954  }
17955  else
17956  {
17957  VMA_ASSERT(offset + size <= allocationSize);
17958  outRange.size = VMA_MIN(
17959  VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
17960  allocationSize - outRange.offset);
17961  }
17962  break;
17963  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17964  {
17965  // 1. Still within this allocation.
17966  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17967  if(size == VK_WHOLE_SIZE)
17968  {
17969  size = allocationSize - offset;
17970  }
17971  else
17972  {
17973  VMA_ASSERT(offset + size <= allocationSize);
17974  }
17975  outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
17976 
17977  // 2. Adjust to whole block.
17978  const VkDeviceSize allocationOffset = allocation->GetOffset();
17979  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
17980  const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
17981  outRange.offset += allocationOffset;
17982  outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
17983 
17984  break;
17985  }
17986  default:
17987  VMA_ASSERT(0);
17988  }
17989  return true;
17990  }
17991  return false;
17992 }
17993 
17994 #if VMA_MEMORY_BUDGET
17995 
17996 void VmaAllocator_T::UpdateVulkanBudget()
17997 {
17998  VMA_ASSERT(m_UseExtMemoryBudget);
17999 
18000  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
18001 
18002  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
18003  VmaPnextChainPushFront(&memProps, &budgetProps);
18004 
18005  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
18006 
18007  {
18008  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
18009 
18010  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
18011  {
18012  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
18013  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
18014  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
18015 
18016  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
18017  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
18018  {
18019  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
18020  }
18021  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
18022  {
18023  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
18024  }
18025  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
18026  {
18027  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
18028  }
18029  }
18030  m_Budget.m_OperationsSinceBudgetFetch = 0;
18031  }
18032 }
18033 
18034 #endif // #if VMA_MEMORY_BUDGET
18035 
18036 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
18037 {
18038  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
18039  !hAllocation->CanBecomeLost() &&
18040  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
18041  {
18042  void* pData = VMA_NULL;
18043  VkResult res = Map(hAllocation, &pData);
18044  if(res == VK_SUCCESS)
18045  {
18046  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
18047  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
18048  Unmap(hAllocation);
18049  }
18050  else
18051  {
18052  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
18053  }
18054  }
18055 }
18056 
18057 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
18058 {
18059  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
18060  if(memoryTypeBits == UINT32_MAX)
18061  {
18062  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
18063  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
18064  }
18065  return memoryTypeBits;
18066 }
18067 
18068 #if VMA_STATS_STRING_ENABLED
18069 
18070 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
18071 {
18072  bool dedicatedAllocationsStarted = false;
18073  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
18074  {
18075  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
18076  DedicatedAllocationLinkedList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
18077  if(!dedicatedAllocList.IsEmpty())
18078  {
18079  if(dedicatedAllocationsStarted == false)
18080  {
18081  dedicatedAllocationsStarted = true;
18082  json.WriteString("DedicatedAllocations");
18083  json.BeginObject();
18084  }
18085 
18086  json.BeginString("Type ");
18087  json.ContinueString(memTypeIndex);
18088  json.EndString();
18089 
18090  json.BeginArray();
18091 
18092  for(VmaAllocation alloc = dedicatedAllocList.Front();
18093  alloc != VMA_NULL; alloc = dedicatedAllocList.GetNext(alloc))
18094  {
18095  json.BeginObject(true);
18096  alloc->PrintParameters(json);
18097  json.EndObject();
18098  }
18099 
18100  json.EndArray();
18101  }
18102  }
18103  if(dedicatedAllocationsStarted)
18104  {
18105  json.EndObject();
18106  }
18107 
18108  {
18109  bool allocationsStarted = false;
18110  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
18111  {
18112  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
18113  {
18114  if(allocationsStarted == false)
18115  {
18116  allocationsStarted = true;
18117  json.WriteString("DefaultPools");
18118  json.BeginObject();
18119  }
18120 
18121  json.BeginString("Type ");
18122  json.ContinueString(memTypeIndex);
18123  json.EndString();
18124 
18125  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
18126  }
18127  }
18128  if(allocationsStarted)
18129  {
18130  json.EndObject();
18131  }
18132  }
18133 
18134  // Custom pools
18135  {
18136  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
18137  if(!m_Pools.IsEmpty())
18138  {
18139  json.WriteString("Pools");
18140  json.BeginObject();
18141  for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
18142  {
18143  json.BeginString();
18144  json.ContinueString(pool->GetId());
18145  json.EndString();
18146 
18147  pool->m_BlockVector.PrintDetailedMap(json);
18148  }
18149  json.EndObject();
18150  }
18151  }
18152 }
18153 
18154 #endif // #if VMA_STATS_STRING_ENABLED
18155 
18157 // Public interface
18158 
18159 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
18160  const VmaAllocatorCreateInfo* pCreateInfo,
18161  VmaAllocator* pAllocator)
18162 {
18163  VMA_ASSERT(pCreateInfo && pAllocator);
18164  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
18165  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
18166  VMA_DEBUG_LOG("vmaCreateAllocator");
18167  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
18168  return (*pAllocator)->Init(pCreateInfo);
18169 }
18170 
18171 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
18172  VmaAllocator allocator)
18173 {
18174  if(allocator != VK_NULL_HANDLE)
18175  {
18176  VMA_DEBUG_LOG("vmaDestroyAllocator");
18177  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
18178  vma_delete(&allocationCallbacks, allocator);
18179  }
18180 }
18181 
18182 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
18183 {
18184  VMA_ASSERT(allocator && pAllocatorInfo);
18185  pAllocatorInfo->instance = allocator->m_hInstance;
18186  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
18187  pAllocatorInfo->device = allocator->m_hDevice;
18188 }
18189 
18190 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
18191  VmaAllocator allocator,
18192  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
18193 {
18194  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
18195  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
18196 }
18197 
18198 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
18199  VmaAllocator allocator,
18200  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
18201 {
18202  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
18203  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
18204 }
18205 
18206 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
18207  VmaAllocator allocator,
18208  uint32_t memoryTypeIndex,
18209  VkMemoryPropertyFlags* pFlags)
18210 {
18211  VMA_ASSERT(allocator && pFlags);
18212  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
18213  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
18214 }
18215 
18216 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
18217  VmaAllocator allocator,
18218  uint32_t frameIndex)
18219 {
18220  VMA_ASSERT(allocator);
18221  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
18222 
18223  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18224 
18225  allocator->SetCurrentFrameIndex(frameIndex);
18226 }
18227 
18228 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
18229  VmaAllocator allocator,
18230  VmaStats* pStats)
18231 {
18232  VMA_ASSERT(allocator && pStats);
18233  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18234  allocator->CalculateStats(pStats);
18235 }
18236 
18237 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
18238  VmaAllocator allocator,
18239  VmaBudget* pBudget)
18240 {
18241  VMA_ASSERT(allocator && pBudget);
18242  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18243  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
18244 }
18245 
18246 #if VMA_STATS_STRING_ENABLED
18247 
18248 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
18249  VmaAllocator allocator,
18250  char** ppStatsString,
18251  VkBool32 detailedMap)
18252 {
18253  VMA_ASSERT(allocator && ppStatsString);
18254  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18255 
18256  VmaStringBuilder sb(allocator);
18257  {
18258  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
18259  json.BeginObject();
18260 
18261  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
18262  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
18263 
18264  VmaStats stats;
18265  allocator->CalculateStats(&stats);
18266 
18267  json.WriteString("Total");
18268  VmaPrintStatInfo(json, stats.total);
18269 
18270  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
18271  {
18272  json.BeginString("Heap ");
18273  json.ContinueString(heapIndex);
18274  json.EndString();
18275  json.BeginObject();
18276 
18277  json.WriteString("Size");
18278  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
18279 
18280  json.WriteString("Flags");
18281  json.BeginArray(true);
18282  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
18283  {
18284  json.WriteString("DEVICE_LOCAL");
18285  }
18286  json.EndArray();
18287 
18288  json.WriteString("Budget");
18289  json.BeginObject();
18290  {
18291  json.WriteString("BlockBytes");
18292  json.WriteNumber(budget[heapIndex].blockBytes);
18293  json.WriteString("AllocationBytes");
18294  json.WriteNumber(budget[heapIndex].allocationBytes);
18295  json.WriteString("Usage");
18296  json.WriteNumber(budget[heapIndex].usage);
18297  json.WriteString("Budget");
18298  json.WriteNumber(budget[heapIndex].budget);
18299  }
18300  json.EndObject();
18301 
18302  if(stats.memoryHeap[heapIndex].blockCount > 0)
18303  {
18304  json.WriteString("Stats");
18305  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
18306  }
18307 
18308  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
18309  {
18310  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
18311  {
18312  json.BeginString("Type ");
18313  json.ContinueString(typeIndex);
18314  json.EndString();
18315 
18316  json.BeginObject();
18317 
18318  json.WriteString("Flags");
18319  json.BeginArray(true);
18320  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
18321  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
18322  {
18323  json.WriteString("DEVICE_LOCAL");
18324  }
18325  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
18326  {
18327  json.WriteString("HOST_VISIBLE");
18328  }
18329  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
18330  {
18331  json.WriteString("HOST_COHERENT");
18332  }
18333  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
18334  {
18335  json.WriteString("HOST_CACHED");
18336  }
18337  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
18338  {
18339  json.WriteString("LAZILY_ALLOCATED");
18340  }
18341 #if VMA_VULKAN_VERSION >= 1001000
18342  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
18343  {
18344  json.WriteString("PROTECTED");
18345  }
18346 #endif // #if VMA_VULKAN_VERSION >= 1001000
18347 #if VK_AMD_device_coherent_memory
18348  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
18349  {
18350  json.WriteString("DEVICE_COHERENT");
18351  }
18352  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
18353  {
18354  json.WriteString("DEVICE_UNCACHED");
18355  }
18356 #endif // #if VK_AMD_device_coherent_memory
18357  json.EndArray();
18358 
18359  if(stats.memoryType[typeIndex].blockCount > 0)
18360  {
18361  json.WriteString("Stats");
18362  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
18363  }
18364 
18365  json.EndObject();
18366  }
18367  }
18368 
18369  json.EndObject();
18370  }
18371  if(detailedMap == VK_TRUE)
18372  {
18373  allocator->PrintDetailedMap(json);
18374  }
18375 
18376  json.EndObject();
18377  }
18378 
18379  const size_t len = sb.GetLength();
18380  char* const pChars = vma_new_array(allocator, char, len + 1);
18381  if(len > 0)
18382  {
18383  memcpy(pChars, sb.GetData(), len);
18384  }
18385  pChars[len] = '\0';
18386  *ppStatsString = pChars;
18387 }
18388 
18389 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
18390  VmaAllocator allocator,
18391  char* pStatsString)
18392 {
18393  if(pStatsString != VMA_NULL)
18394  {
18395  VMA_ASSERT(allocator);
18396  size_t len = strlen(pStatsString);
18397  vma_delete_array(allocator, pStatsString, len + 1);
18398  }
18399 }
18400 
18401 #endif // #if VMA_STATS_STRING_ENABLED
18402 
18403 /*
18404 This function is not protected by any mutex because it just reads immutable data.
18405 */
18406 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
18407  VmaAllocator allocator,
18408  uint32_t memoryTypeBits,
18409  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18410  uint32_t* pMemoryTypeIndex)
18411 {
18412  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18413  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18414  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18415 
18416  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
18417 
18418  if(pAllocationCreateInfo->memoryTypeBits != 0)
18419  {
18420  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
18421  }
18422 
18423  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
18424  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
18425  uint32_t notPreferredFlags = 0;
18426 
18427  // Convert usage to requiredFlags and preferredFlags.
18428  switch(pAllocationCreateInfo->usage)
18429  {
18431  break;
18433  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
18434  {
18435  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18436  }
18437  break;
18439  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
18440  break;
18442  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
18443  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
18444  {
18445  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18446  }
18447  break;
18449  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
18450  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
18451  break;
18453  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18454  break;
18456  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
18457  break;
18458  default:
18459  VMA_ASSERT(0);
18460  break;
18461  }
18462 
18463  // Avoid DEVICE_COHERENT unless explicitly requested.
18464  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
18465  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
18466  {
18467  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
18468  }
18469 
18470  *pMemoryTypeIndex = UINT32_MAX;
18471  uint32_t minCost = UINT32_MAX;
18472  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
18473  memTypeIndex < allocator->GetMemoryTypeCount();
18474  ++memTypeIndex, memTypeBit <<= 1)
18475  {
18476  // This memory type is acceptable according to memoryTypeBits bitmask.
18477  if((memTypeBit & memoryTypeBits) != 0)
18478  {
18479  const VkMemoryPropertyFlags currFlags =
18480  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
18481  // This memory type contains requiredFlags.
18482  if((requiredFlags & ~currFlags) == 0)
18483  {
18484  // Calculate cost as number of bits from preferredFlags not present in this memory type.
18485  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
18486  VmaCountBitsSet(currFlags & notPreferredFlags);
18487  // Remember memory type with lowest cost.
18488  if(currCost < minCost)
18489  {
18490  *pMemoryTypeIndex = memTypeIndex;
18491  if(currCost == 0)
18492  {
18493  return VK_SUCCESS;
18494  }
18495  minCost = currCost;
18496  }
18497  }
18498  }
18499  }
18500  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
18501 }
18502 
18503 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
18504  VmaAllocator allocator,
18505  const VkBufferCreateInfo* pBufferCreateInfo,
18506  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18507  uint32_t* pMemoryTypeIndex)
18508 {
18509  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18510  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
18511  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18512  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18513 
18514  const VkDevice hDev = allocator->m_hDevice;
18515  VkBuffer hBuffer = VK_NULL_HANDLE;
18516  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
18517  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
18518  if(res == VK_SUCCESS)
18519  {
18520  VkMemoryRequirements memReq = {};
18521  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
18522  hDev, hBuffer, &memReq);
18523 
18524  res = vmaFindMemoryTypeIndex(
18525  allocator,
18526  memReq.memoryTypeBits,
18527  pAllocationCreateInfo,
18528  pMemoryTypeIndex);
18529 
18530  allocator->GetVulkanFunctions().vkDestroyBuffer(
18531  hDev, hBuffer, allocator->GetAllocationCallbacks());
18532  }
18533  return res;
18534 }
18535 
18536 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
18537  VmaAllocator allocator,
18538  const VkImageCreateInfo* pImageCreateInfo,
18539  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18540  uint32_t* pMemoryTypeIndex)
18541 {
18542  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18543  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
18544  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18545  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18546 
18547  const VkDevice hDev = allocator->m_hDevice;
18548  VkImage hImage = VK_NULL_HANDLE;
18549  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
18550  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
18551  if(res == VK_SUCCESS)
18552  {
18553  VkMemoryRequirements memReq = {};
18554  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
18555  hDev, hImage, &memReq);
18556 
18557  res = vmaFindMemoryTypeIndex(
18558  allocator,
18559  memReq.memoryTypeBits,
18560  pAllocationCreateInfo,
18561  pMemoryTypeIndex);
18562 
18563  allocator->GetVulkanFunctions().vkDestroyImage(
18564  hDev, hImage, allocator->GetAllocationCallbacks());
18565  }
18566  return res;
18567 }
18568 
18569 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
18570  VmaAllocator allocator,
18571  const VmaPoolCreateInfo* pCreateInfo,
18572  VmaPool* pPool)
18573 {
18574  VMA_ASSERT(allocator && pCreateInfo && pPool);
18575 
18576  VMA_DEBUG_LOG("vmaCreatePool");
18577 
18578  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18579 
18580  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
18581 
18582 #if VMA_RECORDING_ENABLED
18583  if(allocator->GetRecorder() != VMA_NULL)
18584  {
18585  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
18586  }
18587 #endif
18588 
18589  return res;
18590 }
18591 
18592 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
18593  VmaAllocator allocator,
18594  VmaPool pool)
18595 {
18596  VMA_ASSERT(allocator);
18597 
18598  if(pool == VK_NULL_HANDLE)
18599  {
18600  return;
18601  }
18602 
18603  VMA_DEBUG_LOG("vmaDestroyPool");
18604 
18605  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18606 
18607 #if VMA_RECORDING_ENABLED
18608  if(allocator->GetRecorder() != VMA_NULL)
18609  {
18610  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
18611  }
18612 #endif
18613 
18614  allocator->DestroyPool(pool);
18615 }
18616 
18617 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
18618  VmaAllocator allocator,
18619  VmaPool pool,
18620  VmaPoolStats* pPoolStats)
18621 {
18622  VMA_ASSERT(allocator && pool && pPoolStats);
18623 
18624  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18625 
18626  allocator->GetPoolStats(pool, pPoolStats);
18627 }
18628 
18629 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
18630  VmaAllocator allocator,
18631  VmaPool pool,
18632  size_t* pLostAllocationCount)
18633 {
18634  VMA_ASSERT(allocator && pool);
18635 
18636  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18637 
18638 #if VMA_RECORDING_ENABLED
18639  if(allocator->GetRecorder() != VMA_NULL)
18640  {
18641  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
18642  }
18643 #endif
18644 
18645  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
18646 }
18647 
18648 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
18649 {
18650  VMA_ASSERT(allocator && pool);
18651 
18652  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18653 
18654  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
18655 
18656  return allocator->CheckPoolCorruption(pool);
18657 }
18658 
18659 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
18660  VmaAllocator allocator,
18661  VmaPool pool,
18662  const char** ppName)
18663 {
18664  VMA_ASSERT(allocator && pool && ppName);
18665 
18666  VMA_DEBUG_LOG("vmaGetPoolName");
18667 
18668  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18669 
18670  *ppName = pool->GetName();
18671 }
18672 
18673 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
18674  VmaAllocator allocator,
18675  VmaPool pool,
18676  const char* pName)
18677 {
18678  VMA_ASSERT(allocator && pool);
18679 
18680  VMA_DEBUG_LOG("vmaSetPoolName");
18681 
18682  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18683 
18684  pool->SetName(pName);
18685 
18686 #if VMA_RECORDING_ENABLED
18687  if(allocator->GetRecorder() != VMA_NULL)
18688  {
18689  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
18690  }
18691 #endif
18692 }
18693 
18694 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
18695  VmaAllocator allocator,
18696  const VkMemoryRequirements* pVkMemoryRequirements,
18697  const VmaAllocationCreateInfo* pCreateInfo,
18698  VmaAllocation* pAllocation,
18699  VmaAllocationInfo* pAllocationInfo)
18700 {
18701  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
18702 
18703  VMA_DEBUG_LOG("vmaAllocateMemory");
18704 
18705  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18706 
18707  VkResult result = allocator->AllocateMemory(
18708  *pVkMemoryRequirements,
18709  false, // requiresDedicatedAllocation
18710  false, // prefersDedicatedAllocation
18711  VK_NULL_HANDLE, // dedicatedBuffer
18712  UINT32_MAX, // dedicatedBufferUsage
18713  VK_NULL_HANDLE, // dedicatedImage
18714  *pCreateInfo,
18715  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18716  1, // allocationCount
18717  pAllocation);
18718 
18719 #if VMA_RECORDING_ENABLED
18720  if(allocator->GetRecorder() != VMA_NULL)
18721  {
18722  allocator->GetRecorder()->RecordAllocateMemory(
18723  allocator->GetCurrentFrameIndex(),
18724  *pVkMemoryRequirements,
18725  *pCreateInfo,
18726  *pAllocation);
18727  }
18728 #endif
18729 
18730  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18731  {
18732  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18733  }
18734 
18735  return result;
18736 }
18737 
18738 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
18739  VmaAllocator allocator,
18740  const VkMemoryRequirements* pVkMemoryRequirements,
18741  const VmaAllocationCreateInfo* pCreateInfo,
18742  size_t allocationCount,
18743  VmaAllocation* pAllocations,
18744  VmaAllocationInfo* pAllocationInfo)
18745 {
18746  if(allocationCount == 0)
18747  {
18748  return VK_SUCCESS;
18749  }
18750 
18751  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
18752 
18753  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
18754 
18755  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18756 
18757  VkResult result = allocator->AllocateMemory(
18758  *pVkMemoryRequirements,
18759  false, // requiresDedicatedAllocation
18760  false, // prefersDedicatedAllocation
18761  VK_NULL_HANDLE, // dedicatedBuffer
18762  UINT32_MAX, // dedicatedBufferUsage
18763  VK_NULL_HANDLE, // dedicatedImage
18764  *pCreateInfo,
18765  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18766  allocationCount,
18767  pAllocations);
18768 
18769 #if VMA_RECORDING_ENABLED
18770  if(allocator->GetRecorder() != VMA_NULL)
18771  {
18772  allocator->GetRecorder()->RecordAllocateMemoryPages(
18773  allocator->GetCurrentFrameIndex(),
18774  *pVkMemoryRequirements,
18775  *pCreateInfo,
18776  (uint64_t)allocationCount,
18777  pAllocations);
18778  }
18779 #endif
18780 
18781  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18782  {
18783  for(size_t i = 0; i < allocationCount; ++i)
18784  {
18785  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
18786  }
18787  }
18788 
18789  return result;
18790 }
18791 
18792 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
18793  VmaAllocator allocator,
18794  VkBuffer buffer,
18795  const VmaAllocationCreateInfo* pCreateInfo,
18796  VmaAllocation* pAllocation,
18797  VmaAllocationInfo* pAllocationInfo)
18798 {
18799  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18800 
18801  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
18802 
18803  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18804 
18805  VkMemoryRequirements vkMemReq = {};
18806  bool requiresDedicatedAllocation = false;
18807  bool prefersDedicatedAllocation = false;
18808  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
18809  requiresDedicatedAllocation,
18810  prefersDedicatedAllocation);
18811 
18812  VkResult result = allocator->AllocateMemory(
18813  vkMemReq,
18814  requiresDedicatedAllocation,
18815  prefersDedicatedAllocation,
18816  buffer, // dedicatedBuffer
18817  UINT32_MAX, // dedicatedBufferUsage
18818  VK_NULL_HANDLE, // dedicatedImage
18819  *pCreateInfo,
18820  VMA_SUBALLOCATION_TYPE_BUFFER,
18821  1, // allocationCount
18822  pAllocation);
18823 
18824 #if VMA_RECORDING_ENABLED
18825  if(allocator->GetRecorder() != VMA_NULL)
18826  {
18827  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
18828  allocator->GetCurrentFrameIndex(),
18829  vkMemReq,
18830  requiresDedicatedAllocation,
18831  prefersDedicatedAllocation,
18832  *pCreateInfo,
18833  *pAllocation);
18834  }
18835 #endif
18836 
18837  if(pAllocationInfo && result == VK_SUCCESS)
18838  {
18839  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18840  }
18841 
18842  return result;
18843 }
18844 
18845 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
18846  VmaAllocator allocator,
18847  VkImage image,
18848  const VmaAllocationCreateInfo* pCreateInfo,
18849  VmaAllocation* pAllocation,
18850  VmaAllocationInfo* pAllocationInfo)
18851 {
18852  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18853 
18854  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
18855 
18856  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18857 
18858  VkMemoryRequirements vkMemReq = {};
18859  bool requiresDedicatedAllocation = false;
18860  bool prefersDedicatedAllocation = false;
18861  allocator->GetImageMemoryRequirements(image, vkMemReq,
18862  requiresDedicatedAllocation, prefersDedicatedAllocation);
18863 
18864  VkResult result = allocator->AllocateMemory(
18865  vkMemReq,
18866  requiresDedicatedAllocation,
18867  prefersDedicatedAllocation,
18868  VK_NULL_HANDLE, // dedicatedBuffer
18869  UINT32_MAX, // dedicatedBufferUsage
18870  image, // dedicatedImage
18871  *pCreateInfo,
18872  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
18873  1, // allocationCount
18874  pAllocation);
18875 
18876 #if VMA_RECORDING_ENABLED
18877  if(allocator->GetRecorder() != VMA_NULL)
18878  {
18879  allocator->GetRecorder()->RecordAllocateMemoryForImage(
18880  allocator->GetCurrentFrameIndex(),
18881  vkMemReq,
18882  requiresDedicatedAllocation,
18883  prefersDedicatedAllocation,
18884  *pCreateInfo,
18885  *pAllocation);
18886  }
18887 #endif
18888 
18889  if(pAllocationInfo && result == VK_SUCCESS)
18890  {
18891  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18892  }
18893 
18894  return result;
18895 }
18896 
18897 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
18898  VmaAllocator allocator,
18899  VmaAllocation allocation)
18900 {
18901  VMA_ASSERT(allocator);
18902 
18903  if(allocation == VK_NULL_HANDLE)
18904  {
18905  return;
18906  }
18907 
18908  VMA_DEBUG_LOG("vmaFreeMemory");
18909 
18910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18911 
18912 #if VMA_RECORDING_ENABLED
18913  if(allocator->GetRecorder() != VMA_NULL)
18914  {
18915  allocator->GetRecorder()->RecordFreeMemory(
18916  allocator->GetCurrentFrameIndex(),
18917  allocation);
18918  }
18919 #endif
18920 
18921  allocator->FreeMemory(
18922  1, // allocationCount
18923  &allocation);
18924 }
18925 
18926 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
18927  VmaAllocator allocator,
18928  size_t allocationCount,
18929  const VmaAllocation* pAllocations)
18930 {
18931  if(allocationCount == 0)
18932  {
18933  return;
18934  }
18935 
18936  VMA_ASSERT(allocator);
18937 
18938  VMA_DEBUG_LOG("vmaFreeMemoryPages");
18939 
18940  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18941 
18942 #if VMA_RECORDING_ENABLED
18943  if(allocator->GetRecorder() != VMA_NULL)
18944  {
18945  allocator->GetRecorder()->RecordFreeMemoryPages(
18946  allocator->GetCurrentFrameIndex(),
18947  (uint64_t)allocationCount,
18948  pAllocations);
18949  }
18950 #endif
18951 
18952  allocator->FreeMemory(allocationCount, pAllocations);
18953 }
18954 
18955 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
18956  VmaAllocator allocator,
18957  VmaAllocation allocation,
18958  VmaAllocationInfo* pAllocationInfo)
18959 {
18960  VMA_ASSERT(allocator && allocation && pAllocationInfo);
18961 
18962  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18963 
18964 #if VMA_RECORDING_ENABLED
18965  if(allocator->GetRecorder() != VMA_NULL)
18966  {
18967  allocator->GetRecorder()->RecordGetAllocationInfo(
18968  allocator->GetCurrentFrameIndex(),
18969  allocation);
18970  }
18971 #endif
18972 
18973  allocator->GetAllocationInfo(allocation, pAllocationInfo);
18974 }
18975 
18976 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
18977  VmaAllocator allocator,
18978  VmaAllocation allocation)
18979 {
18980  VMA_ASSERT(allocator && allocation);
18981 
18982  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18983 
18984 #if VMA_RECORDING_ENABLED
18985  if(allocator->GetRecorder() != VMA_NULL)
18986  {
18987  allocator->GetRecorder()->RecordTouchAllocation(
18988  allocator->GetCurrentFrameIndex(),
18989  allocation);
18990  }
18991 #endif
18992 
18993  return allocator->TouchAllocation(allocation);
18994 }
18995 
18996 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
18997  VmaAllocator allocator,
18998  VmaAllocation allocation,
18999  void* pUserData)
19000 {
19001  VMA_ASSERT(allocator && allocation);
19002 
19003  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19004 
19005  allocation->SetUserData(allocator, pUserData);
19006 
19007 #if VMA_RECORDING_ENABLED
19008  if(allocator->GetRecorder() != VMA_NULL)
19009  {
19010  allocator->GetRecorder()->RecordSetAllocationUserData(
19011  allocator->GetCurrentFrameIndex(),
19012  allocation,
19013  pUserData);
19014  }
19015 #endif
19016 }
19017 
19018 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
19019  VmaAllocator allocator,
19020  VmaAllocation* pAllocation)
19021 {
19022  VMA_ASSERT(allocator && pAllocation);
19023 
19024  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
19025 
19026  allocator->CreateLostAllocation(pAllocation);
19027 
19028 #if VMA_RECORDING_ENABLED
19029  if(allocator->GetRecorder() != VMA_NULL)
19030  {
19031  allocator->GetRecorder()->RecordCreateLostAllocation(
19032  allocator->GetCurrentFrameIndex(),
19033  *pAllocation);
19034  }
19035 #endif
19036 }
19037 
19038 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
19039  VmaAllocator allocator,
19040  VmaAllocation allocation,
19041  void** ppData)
19042 {
19043  VMA_ASSERT(allocator && allocation && ppData);
19044 
19045  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19046 
19047  VkResult res = allocator->Map(allocation, ppData);
19048 
19049 #if VMA_RECORDING_ENABLED
19050  if(allocator->GetRecorder() != VMA_NULL)
19051  {
19052  allocator->GetRecorder()->RecordMapMemory(
19053  allocator->GetCurrentFrameIndex(),
19054  allocation);
19055  }
19056 #endif
19057 
19058  return res;
19059 }
19060 
19061 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
19062  VmaAllocator allocator,
19063  VmaAllocation allocation)
19064 {
19065  VMA_ASSERT(allocator && allocation);
19066 
19067  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19068 
19069 #if VMA_RECORDING_ENABLED
19070  if(allocator->GetRecorder() != VMA_NULL)
19071  {
19072  allocator->GetRecorder()->RecordUnmapMemory(
19073  allocator->GetCurrentFrameIndex(),
19074  allocation);
19075  }
19076 #endif
19077 
19078  allocator->Unmap(allocation);
19079 }
19080 
19081 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
19082 {
19083  VMA_ASSERT(allocator && allocation);
19084 
19085  VMA_DEBUG_LOG("vmaFlushAllocation");
19086 
19087  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19088 
19089  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
19090 
19091 #if VMA_RECORDING_ENABLED
19092  if(allocator->GetRecorder() != VMA_NULL)
19093  {
19094  allocator->GetRecorder()->RecordFlushAllocation(
19095  allocator->GetCurrentFrameIndex(),
19096  allocation, offset, size);
19097  }
19098 #endif
19099 
19100  return res;
19101 }
19102 
19103 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
19104 {
19105  VMA_ASSERT(allocator && allocation);
19106 
19107  VMA_DEBUG_LOG("vmaInvalidateAllocation");
19108 
19109  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19110 
19111  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
19112 
19113 #if VMA_RECORDING_ENABLED
19114  if(allocator->GetRecorder() != VMA_NULL)
19115  {
19116  allocator->GetRecorder()->RecordInvalidateAllocation(
19117  allocator->GetCurrentFrameIndex(),
19118  allocation, offset, size);
19119  }
19120 #endif
19121 
19122  return res;
19123 }
19124 
19125 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
19126  VmaAllocator allocator,
19127  uint32_t allocationCount,
19128  const VmaAllocation* allocations,
19129  const VkDeviceSize* offsets,
19130  const VkDeviceSize* sizes)
19131 {
19132  VMA_ASSERT(allocator);
19133 
19134  if(allocationCount == 0)
19135  {
19136  return VK_SUCCESS;
19137  }
19138 
19139  VMA_ASSERT(allocations);
19140 
19141  VMA_DEBUG_LOG("vmaFlushAllocations");
19142 
19143  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19144 
19145  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
19146 
19147 #if VMA_RECORDING_ENABLED
19148  if(allocator->GetRecorder() != VMA_NULL)
19149  {
19150  //TODO
19151  }
19152 #endif
19153 
19154  return res;
19155 }
19156 
19157 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
19158  VmaAllocator allocator,
19159  uint32_t allocationCount,
19160  const VmaAllocation* allocations,
19161  const VkDeviceSize* offsets,
19162  const VkDeviceSize* sizes)
19163 {
19164  VMA_ASSERT(allocator);
19165 
19166  if(allocationCount == 0)
19167  {
19168  return VK_SUCCESS;
19169  }
19170 
19171  VMA_ASSERT(allocations);
19172 
19173  VMA_DEBUG_LOG("vmaInvalidateAllocations");
19174 
19175  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19176 
19177  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
19178 
19179 #if VMA_RECORDING_ENABLED
19180  if(allocator->GetRecorder() != VMA_NULL)
19181  {
19182  //TODO
19183  }
19184 #endif
19185 
19186  return res;
19187 }
19188 
19189 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
19190 {
19191  VMA_ASSERT(allocator);
19192 
19193  VMA_DEBUG_LOG("vmaCheckCorruption");
19194 
19195  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19196 
19197  return allocator->CheckCorruption(memoryTypeBits);
19198 }
19199 
19200 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
19201  VmaAllocator allocator,
19202  const VmaAllocation* pAllocations,
19203  size_t allocationCount,
19204  VkBool32* pAllocationsChanged,
19205  const VmaDefragmentationInfo *pDefragmentationInfo,
19206  VmaDefragmentationStats* pDefragmentationStats)
19207 {
19208  // Deprecated interface, reimplemented using new one.
19209 
19210  VmaDefragmentationInfo2 info2 = {};
19211  info2.allocationCount = (uint32_t)allocationCount;
19212  info2.pAllocations = pAllocations;
19213  info2.pAllocationsChanged = pAllocationsChanged;
19214  if(pDefragmentationInfo != VMA_NULL)
19215  {
19216  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
19217  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
19218  }
19219  else
19220  {
19221  info2.maxCpuAllocationsToMove = UINT32_MAX;
19222  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
19223  }
19224  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
19225 
19227  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
19228  if(res == VK_NOT_READY)
19229  {
19230  res = vmaDefragmentationEnd( allocator, ctx);
19231  }
19232  return res;
19233 }
19234 
19235 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
19236  VmaAllocator allocator,
19237  const VmaDefragmentationInfo2* pInfo,
19238  VmaDefragmentationStats* pStats,
19239  VmaDefragmentationContext *pContext)
19240 {
19241  VMA_ASSERT(allocator && pInfo && pContext);
19242 
19243  // Degenerate case: Nothing to defragment.
19244  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
19245  {
19246  return VK_SUCCESS;
19247  }
19248 
19249  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
19250  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
19251  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
19252  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
19253 
19254  VMA_DEBUG_LOG("vmaDefragmentationBegin");
19255 
19256  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19257 
19258  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
19259 
19260 #if VMA_RECORDING_ENABLED
19261  if(allocator->GetRecorder() != VMA_NULL)
19262  {
19263  allocator->GetRecorder()->RecordDefragmentationBegin(
19264  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
19265  }
19266 #endif
19267 
19268  return res;
19269 }
19270 
19271 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
19272  VmaAllocator allocator,
19273  VmaDefragmentationContext context)
19274 {
19275  VMA_ASSERT(allocator);
19276 
19277  VMA_DEBUG_LOG("vmaDefragmentationEnd");
19278 
19279  if(context != VK_NULL_HANDLE)
19280  {
19281  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19282 
19283 #if VMA_RECORDING_ENABLED
19284  if(allocator->GetRecorder() != VMA_NULL)
19285  {
19286  allocator->GetRecorder()->RecordDefragmentationEnd(
19287  allocator->GetCurrentFrameIndex(), context);
19288  }
19289 #endif
19290 
19291  return allocator->DefragmentationEnd(context);
19292  }
19293  else
19294  {
19295  return VK_SUCCESS;
19296  }
19297 }
19298 
19299 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
19300  VmaAllocator allocator,
19301  VmaDefragmentationContext context,
19303  )
19304 {
19305  VMA_ASSERT(allocator);
19306  VMA_ASSERT(pInfo);
19307 
19308  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
19309 
19310  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19311 
19312  if(context == VK_NULL_HANDLE)
19313  {
19314  pInfo->moveCount = 0;
19315  return VK_SUCCESS;
19316  }
19317 
19318  return allocator->DefragmentationPassBegin(pInfo, context);
19319 }
19320 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
19321  VmaAllocator allocator,
19322  VmaDefragmentationContext context)
19323 {
19324  VMA_ASSERT(allocator);
19325 
19326  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
19327  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19328 
19329  if(context == VK_NULL_HANDLE)
19330  return VK_SUCCESS;
19331 
19332  return allocator->DefragmentationPassEnd(context);
19333 }
19334 
19335 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
19336  VmaAllocator allocator,
19337  VmaAllocation allocation,
19338  VkBuffer buffer)
19339 {
19340  VMA_ASSERT(allocator && allocation && buffer);
19341 
19342  VMA_DEBUG_LOG("vmaBindBufferMemory");
19343 
19344  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19345 
19346  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
19347 }
19348 
19349 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
19350  VmaAllocator allocator,
19351  VmaAllocation allocation,
19352  VkDeviceSize allocationLocalOffset,
19353  VkBuffer buffer,
19354  const void* pNext)
19355 {
19356  VMA_ASSERT(allocator && allocation && buffer);
19357 
19358  VMA_DEBUG_LOG("vmaBindBufferMemory2");
19359 
19360  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19361 
19362  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
19363 }
19364 
19365 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
19366  VmaAllocator allocator,
19367  VmaAllocation allocation,
19368  VkImage image)
19369 {
19370  VMA_ASSERT(allocator && allocation && image);
19371 
19372  VMA_DEBUG_LOG("vmaBindImageMemory");
19373 
19374  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19375 
19376  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
19377 }
19378 
19379 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
19380  VmaAllocator allocator,
19381  VmaAllocation allocation,
19382  VkDeviceSize allocationLocalOffset,
19383  VkImage image,
19384  const void* pNext)
19385 {
19386  VMA_ASSERT(allocator && allocation && image);
19387 
19388  VMA_DEBUG_LOG("vmaBindImageMemory2");
19389 
19390  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19391 
19392  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
19393 }
19394 
19395 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
19396  VmaAllocator allocator,
19397  const VkBufferCreateInfo* pBufferCreateInfo,
19398  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19399  VkBuffer* pBuffer,
19400  VmaAllocation* pAllocation,
19401  VmaAllocationInfo* pAllocationInfo)
19402 {
19403  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
19404 
19405  if(pBufferCreateInfo->size == 0)
19406  {
19407  return VK_ERROR_VALIDATION_FAILED_EXT;
19408  }
19409  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
19410  !allocator->m_UseKhrBufferDeviceAddress)
19411  {
19412  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
19413  return VK_ERROR_VALIDATION_FAILED_EXT;
19414  }
19415 
19416  VMA_DEBUG_LOG("vmaCreateBuffer");
19417 
19418  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19419 
19420  *pBuffer = VK_NULL_HANDLE;
19421  *pAllocation = VK_NULL_HANDLE;
19422 
19423  // 1. Create VkBuffer.
19424  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
19425  allocator->m_hDevice,
19426  pBufferCreateInfo,
19427  allocator->GetAllocationCallbacks(),
19428  pBuffer);
19429  if(res >= 0)
19430  {
19431  // 2. vkGetBufferMemoryRequirements.
19432  VkMemoryRequirements vkMemReq = {};
19433  bool requiresDedicatedAllocation = false;
19434  bool prefersDedicatedAllocation = false;
19435  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
19436  requiresDedicatedAllocation, prefersDedicatedAllocation);
19437 
19438  // 3. Allocate memory using allocator.
19439  res = allocator->AllocateMemory(
19440  vkMemReq,
19441  requiresDedicatedAllocation,
19442  prefersDedicatedAllocation,
19443  *pBuffer, // dedicatedBuffer
19444  pBufferCreateInfo->usage, // dedicatedBufferUsage
19445  VK_NULL_HANDLE, // dedicatedImage
19446  *pAllocationCreateInfo,
19447  VMA_SUBALLOCATION_TYPE_BUFFER,
19448  1, // allocationCount
19449  pAllocation);
19450 
19451 #if VMA_RECORDING_ENABLED
19452  if(allocator->GetRecorder() != VMA_NULL)
19453  {
19454  allocator->GetRecorder()->RecordCreateBuffer(
19455  allocator->GetCurrentFrameIndex(),
19456  *pBufferCreateInfo,
19457  *pAllocationCreateInfo,
19458  *pAllocation);
19459  }
19460 #endif
19461 
19462  if(res >= 0)
19463  {
19464  // 3. Bind buffer with memory.
19465  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19466  {
19467  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
19468  }
19469  if(res >= 0)
19470  {
19471  // All steps succeeded.
19472  #if VMA_STATS_STRING_ENABLED
19473  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
19474  #endif
19475  if(pAllocationInfo != VMA_NULL)
19476  {
19477  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19478  }
19479 
19480  return VK_SUCCESS;
19481  }
19482  allocator->FreeMemory(
19483  1, // allocationCount
19484  pAllocation);
19485  *pAllocation = VK_NULL_HANDLE;
19486  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19487  *pBuffer = VK_NULL_HANDLE;
19488  return res;
19489  }
19490  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19491  *pBuffer = VK_NULL_HANDLE;
19492  return res;
19493  }
19494  return res;
19495 }
19496 
19497 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
19498  VmaAllocator allocator,
19499  VkBuffer buffer,
19500  VmaAllocation allocation)
19501 {
19502  VMA_ASSERT(allocator);
19503 
19504  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19505  {
19506  return;
19507  }
19508 
19509  VMA_DEBUG_LOG("vmaDestroyBuffer");
19510 
19511  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19512 
19513 #if VMA_RECORDING_ENABLED
19514  if(allocator->GetRecorder() != VMA_NULL)
19515  {
19516  allocator->GetRecorder()->RecordDestroyBuffer(
19517  allocator->GetCurrentFrameIndex(),
19518  allocation);
19519  }
19520 #endif
19521 
19522  if(buffer != VK_NULL_HANDLE)
19523  {
19524  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
19525  }
19526 
19527  if(allocation != VK_NULL_HANDLE)
19528  {
19529  allocator->FreeMemory(
19530  1, // allocationCount
19531  &allocation);
19532  }
19533 }
19534 
19535 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
19536  VmaAllocator allocator,
19537  const VkImageCreateInfo* pImageCreateInfo,
19538  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19539  VkImage* pImage,
19540  VmaAllocation* pAllocation,
19541  VmaAllocationInfo* pAllocationInfo)
19542 {
19543  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
19544 
19545  if(pImageCreateInfo->extent.width == 0 ||
19546  pImageCreateInfo->extent.height == 0 ||
19547  pImageCreateInfo->extent.depth == 0 ||
19548  pImageCreateInfo->mipLevels == 0 ||
19549  pImageCreateInfo->arrayLayers == 0)
19550  {
19551  return VK_ERROR_VALIDATION_FAILED_EXT;
19552  }
19553 
19554  VMA_DEBUG_LOG("vmaCreateImage");
19555 
19556  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19557 
19558  *pImage = VK_NULL_HANDLE;
19559  *pAllocation = VK_NULL_HANDLE;
19560 
19561  // 1. Create VkImage.
19562  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
19563  allocator->m_hDevice,
19564  pImageCreateInfo,
19565  allocator->GetAllocationCallbacks(),
19566  pImage);
19567  if(res >= 0)
19568  {
19569  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
19570  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
19571  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
19572 
19573  // 2. Allocate memory using allocator.
19574  VkMemoryRequirements vkMemReq = {};
19575  bool requiresDedicatedAllocation = false;
19576  bool prefersDedicatedAllocation = false;
19577  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
19578  requiresDedicatedAllocation, prefersDedicatedAllocation);
19579 
19580  res = allocator->AllocateMemory(
19581  vkMemReq,
19582  requiresDedicatedAllocation,
19583  prefersDedicatedAllocation,
19584  VK_NULL_HANDLE, // dedicatedBuffer
19585  UINT32_MAX, // dedicatedBufferUsage
19586  *pImage, // dedicatedImage
19587  *pAllocationCreateInfo,
19588  suballocType,
19589  1, // allocationCount
19590  pAllocation);
19591 
19592 #if VMA_RECORDING_ENABLED
19593  if(allocator->GetRecorder() != VMA_NULL)
19594  {
19595  allocator->GetRecorder()->RecordCreateImage(
19596  allocator->GetCurrentFrameIndex(),
19597  *pImageCreateInfo,
19598  *pAllocationCreateInfo,
19599  *pAllocation);
19600  }
19601 #endif
19602 
19603  if(res >= 0)
19604  {
19605  // 3. Bind image with memory.
19606  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19607  {
19608  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
19609  }
19610  if(res >= 0)
19611  {
19612  // All steps succeeded.
19613  #if VMA_STATS_STRING_ENABLED
19614  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
19615  #endif
19616  if(pAllocationInfo != VMA_NULL)
19617  {
19618  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19619  }
19620 
19621  return VK_SUCCESS;
19622  }
19623  allocator->FreeMemory(
19624  1, // allocationCount
19625  pAllocation);
19626  *pAllocation = VK_NULL_HANDLE;
19627  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19628  *pImage = VK_NULL_HANDLE;
19629  return res;
19630  }
19631  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19632  *pImage = VK_NULL_HANDLE;
19633  return res;
19634  }
19635  return res;
19636 }
19637 
19638 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
19639  VmaAllocator allocator,
19640  VkImage image,
19641  VmaAllocation allocation)
19642 {
19643  VMA_ASSERT(allocator);
19644 
19645  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19646  {
19647  return;
19648  }
19649 
19650  VMA_DEBUG_LOG("vmaDestroyImage");
19651 
19652  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19653 
19654 #if VMA_RECORDING_ENABLED
19655  if(allocator->GetRecorder() != VMA_NULL)
19656  {
19657  allocator->GetRecorder()->RecordDestroyImage(
19658  allocator->GetCurrentFrameIndex(),
19659  allocation);
19660  }
19661 #endif
19662 
19663  if(image != VK_NULL_HANDLE)
19664  {
19665  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
19666  }
19667  if(allocation != VK_NULL_HANDLE)
19668  {
19669  allocator->FreeMemory(
19670  1, // allocationCount
19671  &allocation);
19672  }
19673 }
19674 
19675 #endif // #ifdef VMA_IMPLEMENTATION
Definition: vk_mem_alloc.h:2900
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2926
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2932
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2918
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2939
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2913
float priority
A floating-point value between 0 and 1, indicating the priority of the allocation relative to other m...
Definition: vk_mem_alloc.h:2946
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2908
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2902
Represents single memory allocation.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:3267
VkDeviceSize offset
Offset in VkDeviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:3291
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:3311
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:3272
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:3302
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:3316
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:3281
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2422
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2427
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2453
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2478
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2424
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null.
Definition: vk_mem_alloc.h:2484
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2436
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2496
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2433
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2491
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2430
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2505
const VkExternalMemoryHandleTypeFlagsKHR * pTypeExternalMemoryHandleTypes
Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
Definition: vk_mem_alloc.h:2516
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2439
Represents main object of this library initialized.
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2532
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2547
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2537
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2542
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2638
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2641
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2652
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2662
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2673
Represents Opaque object that represents started defragmentation process.
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3666
const VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3706
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3672
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3726
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3721
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3669
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3687
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3690
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3735
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3716
const VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3681
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3711
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3757
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3767
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3762
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3748
uint32_t moveCount
Definition: vk_mem_alloc.h:3749
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3750
Definition: vk_mem_alloc.h:3738
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3740
VkDeviceSize offset
Definition: vk_mem_alloc.h:3741
VmaAllocation allocation
Definition: vk_mem_alloc.h:3739
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3771
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3779
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3773
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3775
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3777
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:2231
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:2237
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:2233
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:2235
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:3068
float priority
A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relat...
Definition: vk_mem_alloc.h:3116
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:3071
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:3074
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:3110
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:3083
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:3088
VkDeviceSize minAllocationAlignment
Additional minimum alignment to be used for all allocations created from this pool....
Definition: vk_mem_alloc.h:3123
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:3096
void * pMemoryAllocateNext
Additional pNext chain to be attached to VkMemoryAllocateInfo used for every allocation made by this ...
Definition: vk_mem_alloc.h:3133
Represents custom memory pool.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:3138
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:3141
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:3160
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:3157
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:3147
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3144
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3150
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2407
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2417
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2409
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2599
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2610
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2610
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2609
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2611
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2603
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2611
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2607
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2601
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2610
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2605
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2611
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2616
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2618
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2617
VmaStatInfo total
Definition: vk_mem_alloc.h:2619
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2361
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2371
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2376
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2364
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2368
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2373
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2365
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2372
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2369
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2363
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2362
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2375
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2377
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2370
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2366
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2367
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2378
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2374
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:2217
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:2029
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
struct VmaStats VmaStats
General statistics from current state of Allocator.
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:3064
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2393
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2401
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2399
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:2241
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2316
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:2246
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2298
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2334
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2286
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2271
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2353
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
Definition: vk_mem_alloc.h:2351
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2897
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
void vmaFreeMemory(VmaAllocator allocator, const VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3656
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3657
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3658
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:2210
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, const VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3660
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:3008
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:3043
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3062
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:3054
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:3026
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:3058
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkResult vmaDefragment(VmaAllocator allocator, const VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
VmaMemoryUsage
Definition: vk_mem_alloc.h:2721
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2784
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2752
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2774
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2768
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2782
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2759
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2742
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2725
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkResult vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VkResult vmaInvalidateAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Invalidates memory of given set of allocations.
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
VkResult vmaFlushAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Flushes memory of given set of allocations.
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2355
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2788
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2883
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2819
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2856
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2876
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2795
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2850
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2832
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2886
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2839
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2865
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2806
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2880
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2890
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2845
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2860
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2869
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2895
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2403
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.