Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2021 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
2020 #ifdef __cplusplus
2021 extern "C" {
2022 #endif
2023 
2024 /*
2025 Define this macro to 0/1 to disable/enable support for recording functionality,
2026 available through VmaAllocatorCreateInfo::pRecordSettings.
2027 */
2028 #ifndef VMA_RECORDING_ENABLED
2029  #define VMA_RECORDING_ENABLED 0
2030 #endif
2031 
2032 #if !defined(NOMINMAX) && defined(VMA_IMPLEMENTATION)
2033  #define NOMINMAX // For windows.h
2034 #endif
2035 
2036 #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
2037  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
2038  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
2039  extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
2040  extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
2041  extern PFN_vkAllocateMemory vkAllocateMemory;
2042  extern PFN_vkFreeMemory vkFreeMemory;
2043  extern PFN_vkMapMemory vkMapMemory;
2044  extern PFN_vkUnmapMemory vkUnmapMemory;
2045  extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
2046  extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
2047  extern PFN_vkBindBufferMemory vkBindBufferMemory;
2048  extern PFN_vkBindImageMemory vkBindImageMemory;
2049  extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
2050  extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
2051  extern PFN_vkCreateBuffer vkCreateBuffer;
2052  extern PFN_vkDestroyBuffer vkDestroyBuffer;
2053  extern PFN_vkCreateImage vkCreateImage;
2054  extern PFN_vkDestroyImage vkDestroyImage;
2055  extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
2056  #if VMA_VULKAN_VERSION >= 1001000
2057  extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
2058  extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
2059  extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
2060  extern PFN_vkBindImageMemory2 vkBindImageMemory2;
2061  extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
2062  #endif // #if VMA_VULKAN_VERSION >= 1001000
2063 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
2064 
2065 #ifndef VULKAN_H_
2066  #include <vulkan/vulkan.h>
2067 #endif
2068 
2069 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
2070 // where AAA = major, BBB = minor, CCC = patch.
2071 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
2072 #if !defined(VMA_VULKAN_VERSION)
2073  #if defined(VK_VERSION_1_2)
2074  #define VMA_VULKAN_VERSION 1002000
2075  #elif defined(VK_VERSION_1_1)
2076  #define VMA_VULKAN_VERSION 1001000
2077  #else
2078  #define VMA_VULKAN_VERSION 1000000
2079  #endif
2080 #endif
2081 
2082 #if !defined(VMA_DEDICATED_ALLOCATION)
2083  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
2084  #define VMA_DEDICATED_ALLOCATION 1
2085  #else
2086  #define VMA_DEDICATED_ALLOCATION 0
2087  #endif
2088 #endif
2089 
2090 #if !defined(VMA_BIND_MEMORY2)
2091  #if VK_KHR_bind_memory2
2092  #define VMA_BIND_MEMORY2 1
2093  #else
2094  #define VMA_BIND_MEMORY2 0
2095  #endif
2096 #endif
2097 
2098 #if !defined(VMA_MEMORY_BUDGET)
2099  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
2100  #define VMA_MEMORY_BUDGET 1
2101  #else
2102  #define VMA_MEMORY_BUDGET 0
2103  #endif
2104 #endif
2105 
2106 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
2107 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
2108  #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
2109  #define VMA_BUFFER_DEVICE_ADDRESS 1
2110  #else
2111  #define VMA_BUFFER_DEVICE_ADDRESS 0
2112  #endif
2113 #endif
2114 
2115 // Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
2116 #if !defined(VMA_MEMORY_PRIORITY)
2117  #if VK_EXT_memory_priority
2118  #define VMA_MEMORY_PRIORITY 1
2119  #else
2120  #define VMA_MEMORY_PRIORITY 0
2121  #endif
2122 #endif
2123 
2124 // Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
2125 #if !defined(VMA_EXTERNAL_MEMORY)
2126  #if VK_KHR_external_memory
2127  #define VMA_EXTERNAL_MEMORY 1
2128  #else
2129  #define VMA_EXTERNAL_MEMORY 0
2130  #endif
2131 #endif
2132 
2133 // Define these macros to decorate all public functions with additional code,
2134 // before and after returned type, appropriately. This may be useful for
2135 // exporting the functions when compiling VMA as a separate library. Example:
2136 // #define VMA_CALL_PRE __declspec(dllexport)
2137 // #define VMA_CALL_POST __cdecl
2138 #ifndef VMA_CALL_PRE
2139  #define VMA_CALL_PRE
2140 #endif
2141 #ifndef VMA_CALL_POST
2142  #define VMA_CALL_POST
2143 #endif
2144 
2145 // Define this macro to decorate pointers with an attribute specifying the
2146 // length of the array they point to if they are not null.
2147 //
2148 // The length may be one of
2149 // - The name of another parameter in the argument list where the pointer is declared
2150 // - The name of another member in the struct where the pointer is declared
2151 // - The name of a member of a struct type, meaning the value of that member in
2152 // the context of the call. For example
2153 // VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
2154 // this means the number of memory heaps available in the device associated
2155 // with the VmaAllocator being dealt with.
2156 #ifndef VMA_LEN_IF_NOT_NULL
2157  #define VMA_LEN_IF_NOT_NULL(len)
2158 #endif
2159 
2160 // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
2161 // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
2162 #ifndef VMA_NULLABLE
2163  #ifdef __clang__
2164  #define VMA_NULLABLE _Nullable
2165  #else
2166  #define VMA_NULLABLE
2167  #endif
2168 #endif
2169 
2170 // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
2171 // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
2172 #ifndef VMA_NOT_NULL
2173  #ifdef __clang__
2174  #define VMA_NOT_NULL _Nonnull
2175  #else
2176  #define VMA_NOT_NULL
2177  #endif
2178 #endif
2179 
2180 // If non-dispatchable handles are represented as pointers then we can give
2181 // then nullability annotations
2182 #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
2183  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2184  #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
2185  #else
2186  #define VMA_NOT_NULL_NON_DISPATCHABLE
2187  #endif
2188 #endif
2189 
2190 #ifndef VMA_NULLABLE_NON_DISPATCHABLE
2191  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2192  #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
2193  #else
2194  #define VMA_NULLABLE_NON_DISPATCHABLE
2195  #endif
2196 #endif
2197 
2207 VK_DEFINE_HANDLE(VmaAllocator)
2208 
2209 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
2211  VmaAllocator VMA_NOT_NULL allocator,
2212  uint32_t memoryType,
2213  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2214  VkDeviceSize size,
2215  void* VMA_NULLABLE pUserData);
2217 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
2218  VmaAllocator VMA_NOT_NULL allocator,
2219  uint32_t memoryType,
2220  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2221  VkDeviceSize size,
2222  void* VMA_NULLABLE pUserData);
2223 
2237  void* VMA_NULLABLE pUserData;
2239 
2352 
2355 typedef VkFlags VmaAllocatorCreateFlags;
2356 
2361 typedef struct VmaVulkanFunctions {
2362  PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
2363  PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
2364  PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
2365  PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
2366  PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
2367  PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
2368  PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
2369  PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
2370  PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
2371  PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
2372  PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
2373  PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
2374  PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
2375  PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
2376  PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
2377  PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
2378  PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
2379 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2380  PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
2381  PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
2382 #endif
2383 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2384  PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
2385  PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
2386 #endif
2387 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2388  PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
2389 #endif
2391 
2393 typedef enum VmaRecordFlagBits {
2400 
2401  VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
2403 typedef VkFlags VmaRecordFlags;
2404 
2406 typedef struct VmaRecordSettings
2407 {
2417  const char* VMA_NOT_NULL pFilePath;
2419 
2422 {
2426 
2427  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2429 
2430  VkDevice VMA_NOT_NULL device;
2432 
2435 
2436  const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
2438 
2478  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
2479 
2491  const VmaRecordSettings* VMA_NULLABLE pRecordSettings;
2496  VkInstance VMA_NOT_NULL instance;
2506 #if VMA_EXTERNAL_MEMORY
2516  const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
2517 #endif // #if VMA_EXTERNAL_MEMORY
2519 
2521 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2522  const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
2523  VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator);
2524 
2526 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2527  VmaAllocator VMA_NULLABLE allocator);
2528 
2531 typedef struct VmaAllocatorInfo
2532 {
2537  VkInstance VMA_NOT_NULL instance;
2542  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2547  VkDevice VMA_NOT_NULL device;
2549 
2555 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
2556 
2561 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2562  VmaAllocator VMA_NOT_NULL allocator,
2563  const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties);
2564 
2569 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2570  VmaAllocator VMA_NOT_NULL allocator,
2571  const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
2572 
2579 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2580  VmaAllocator VMA_NOT_NULL allocator,
2581  uint32_t memoryTypeIndex,
2582  VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
2583 
2592 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2593  VmaAllocator VMA_NOT_NULL allocator,
2594  uint32_t frameIndex);
2595 
2598 typedef struct VmaStatInfo
2599 {
2601  uint32_t blockCount;
2607  VkDeviceSize usedBytes;
2609  VkDeviceSize unusedBytes;
2613 
2615 typedef struct VmaStats
2616 {
2617  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2618  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2621 
2631 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2632  VmaAllocator VMA_NOT_NULL allocator,
2633  VmaStats* VMA_NOT_NULL pStats);
2634 
2637 typedef struct VmaBudget
2638 {
2641  VkDeviceSize blockBytes;
2642 
2652  VkDeviceSize allocationBytes;
2653 
2662  VkDeviceSize usage;
2663 
2673  VkDeviceSize budget;
2675 
2686 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2687  VmaAllocator VMA_NOT_NULL allocator,
2688  VmaBudget* VMA_NOT_NULL pBudget);
2689 
2690 #ifndef VMA_STATS_STRING_ENABLED
2691 #define VMA_STATS_STRING_ENABLED 1
2692 #endif
2693 
2694 #if VMA_STATS_STRING_ENABLED
2695 
2697 
2699 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2700  VmaAllocator VMA_NOT_NULL allocator,
2701  char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString,
2702  VkBool32 detailedMap);
2703 
2704 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2705  VmaAllocator VMA_NOT_NULL allocator,
2706  char* VMA_NULLABLE pStatsString);
2707 
2708 #endif // #if VMA_STATS_STRING_ENABLED
2709 
2718 VK_DEFINE_HANDLE(VmaPool)
2719 
2720 typedef enum VmaMemoryUsage
2721 {
2783 
2784  VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
2786 
2796 
2861 
2877 
2887 
2894 
2898 
2900 {
2913  VkMemoryPropertyFlags requiredFlags;
2918  VkMemoryPropertyFlags preferredFlags;
2926  uint32_t memoryTypeBits;
2932  VmaPool VMA_NULLABLE pool;
2939  void* VMA_NULLABLE pUserData;
2946  float priority;
2948 
2965 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2966  VmaAllocator VMA_NOT_NULL allocator,
2967  uint32_t memoryTypeBits,
2968  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2969  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2970 
2983 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2984  VmaAllocator VMA_NOT_NULL allocator,
2985  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2986  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2987  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2988 
3001 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
3002  VmaAllocator VMA_NOT_NULL allocator,
3003  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
3004  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3005  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
3006 
3027 
3044 
3055 
3061 
3064 typedef VkFlags VmaPoolCreateFlags;
3065 
3068 typedef struct VmaPoolCreateInfo {
3083  VkDeviceSize blockSize;
3116  float priority;
3133  void* VMA_NULLABLE pMemoryAllocateNext;
3135 
3138 typedef struct VmaPoolStats {
3141  VkDeviceSize size;
3144  VkDeviceSize unusedSize;
3157  VkDeviceSize unusedRangeSizeMax;
3160  size_t blockCount;
3162 
3169 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
3170  VmaAllocator VMA_NOT_NULL allocator,
3171  const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
3172  VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool);
3173 
3176 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
3177  VmaAllocator VMA_NOT_NULL allocator,
3178  VmaPool VMA_NULLABLE pool);
3179 
3186 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
3187  VmaAllocator VMA_NOT_NULL allocator,
3188  VmaPool VMA_NOT_NULL pool,
3189  VmaPoolStats* VMA_NOT_NULL pPoolStats);
3190 
3197 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
3198  VmaAllocator VMA_NOT_NULL allocator,
3199  VmaPool VMA_NOT_NULL pool,
3200  size_t* VMA_NULLABLE pLostAllocationCount);
3201 
3216 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool);
3217 
3224 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
3225  VmaAllocator VMA_NOT_NULL allocator,
3226  VmaPool VMA_NOT_NULL pool,
3227  const char* VMA_NULLABLE * VMA_NOT_NULL ppName);
3228 
3234 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
3235  VmaAllocator VMA_NOT_NULL allocator,
3236  VmaPool VMA_NOT_NULL pool,
3237  const char* VMA_NULLABLE pName);
3238 
3263 VK_DEFINE_HANDLE(VmaAllocation)
3264 
3265 
3267 typedef struct VmaAllocationInfo {
3272  uint32_t memoryType;
3281  VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
3291  VkDeviceSize offset;
3302  VkDeviceSize size;
3311  void* VMA_NULLABLE pMappedData;
3316  void* VMA_NULLABLE pUserData;
3318 
3329 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3330  VmaAllocator VMA_NOT_NULL allocator,
3331  const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
3332  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3333  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3334  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3335 
3355 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3356  VmaAllocator VMA_NOT_NULL allocator,
3357  const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
3358  const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
3359  size_t allocationCount,
3360  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3361  VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
3362 
3369 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3370  VmaAllocator VMA_NOT_NULL allocator,
3371  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3372  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3373  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3374  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3375 
3377 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3378  VmaAllocator VMA_NOT_NULL allocator,
3379  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3380  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3381  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3382  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3383 
3388 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3389  VmaAllocator VMA_NOT_NULL allocator,
3390  const VmaAllocation VMA_NULLABLE allocation);
3391 
3402 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3403  VmaAllocator VMA_NOT_NULL allocator,
3404  size_t allocationCount,
3405  const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
3406 
3423 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3424  VmaAllocator VMA_NOT_NULL allocator,
3425  VmaAllocation VMA_NOT_NULL allocation,
3426  VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
3427 
3442 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3443  VmaAllocator VMA_NOT_NULL allocator,
3444  VmaAllocation VMA_NOT_NULL allocation);
3445 
3459 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3460  VmaAllocator VMA_NOT_NULL allocator,
3461  VmaAllocation VMA_NOT_NULL allocation,
3462  void* VMA_NULLABLE pUserData);
3463 
3474 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3475  VmaAllocator VMA_NOT_NULL allocator,
3476  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation);
3477 
3516 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3517  VmaAllocator VMA_NOT_NULL allocator,
3518  VmaAllocation VMA_NOT_NULL allocation,
3519  void* VMA_NULLABLE * VMA_NOT_NULL ppData);
3520 
3529 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3530  VmaAllocator VMA_NOT_NULL allocator,
3531  VmaAllocation VMA_NOT_NULL allocation);
3532 
3554 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
3555  VmaAllocator VMA_NOT_NULL allocator,
3556  VmaAllocation VMA_NOT_NULL allocation,
3557  VkDeviceSize offset,
3558  VkDeviceSize size);
3559 
3581 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
3582  VmaAllocator VMA_NOT_NULL allocator,
3583  VmaAllocation VMA_NOT_NULL allocation,
3584  VkDeviceSize offset,
3585  VkDeviceSize size);
3586 
3601 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
3602  VmaAllocator VMA_NOT_NULL allocator,
3603  uint32_t allocationCount,
3604  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3605  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3606  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3607 
3622 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
3623  VmaAllocator VMA_NOT_NULL allocator,
3624  uint32_t allocationCount,
3625  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3626  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3627  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3628 
3645 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits);
3646 
3653 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3654 
3655 typedef enum VmaDefragmentationFlagBits {
3660 typedef VkFlags VmaDefragmentationFlags;
3661 
3666 typedef struct VmaDefragmentationInfo2 {
3681  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
3687  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged;
3690  uint32_t poolCount;
3706  const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
3711  VkDeviceSize maxCpuBytesToMove;
3721  VkDeviceSize maxGpuBytesToMove;
3735  VkCommandBuffer VMA_NULLABLE commandBuffer;
3737 
3740  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory;
3741  VkDeviceSize offset;
3743 
3749  uint32_t moveCount;
3750  VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
3752 
3757 typedef struct VmaDefragmentationInfo {
3762  VkDeviceSize maxBytesToMove;
3769 
3771 typedef struct VmaDefragmentationStats {
3773  VkDeviceSize bytesMoved;
3775  VkDeviceSize bytesFreed;
3781 
3811 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3812  VmaAllocator VMA_NOT_NULL allocator,
3813  const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo,
3814  VmaDefragmentationStats* VMA_NULLABLE pStats,
3815  VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext);
3816 
3822 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3823  VmaAllocator VMA_NOT_NULL allocator,
3824  VmaDefragmentationContext VMA_NULLABLE context);
3825 
3826 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3827  VmaAllocator VMA_NOT_NULL allocator,
3828  VmaDefragmentationContext VMA_NULLABLE context,
3829  VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo
3830 );
3831 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3832  VmaAllocator VMA_NOT_NULL allocator,
3833  VmaDefragmentationContext VMA_NULLABLE context
3834 );
3835 
3876 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3877  VmaAllocator VMA_NOT_NULL allocator,
3878  const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3879  size_t allocationCount,
3880  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
3881  const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo,
3882  VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats);
3883 
3896 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3897  VmaAllocator VMA_NOT_NULL allocator,
3898  VmaAllocation VMA_NOT_NULL allocation,
3899  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
3900 
3911 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3912  VmaAllocator VMA_NOT_NULL allocator,
3913  VmaAllocation VMA_NOT_NULL allocation,
3914  VkDeviceSize allocationLocalOffset,
3915  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3916  const void* VMA_NULLABLE pNext);
3917 
3930 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3931  VmaAllocator VMA_NOT_NULL allocator,
3932  VmaAllocation VMA_NOT_NULL allocation,
3933  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
3934 
3945 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3946  VmaAllocator VMA_NOT_NULL allocator,
3947  VmaAllocation VMA_NOT_NULL allocation,
3948  VkDeviceSize allocationLocalOffset,
3949  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3950  const void* VMA_NULLABLE pNext);
3951 
3982 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3983  VmaAllocator VMA_NOT_NULL allocator,
3984  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
3985  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3986  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
3987  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3988  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3989 
3996 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
3997  VmaAllocator VMA_NOT_NULL allocator,
3998  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
3999  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
4000  VkDeviceSize minAlignment,
4001  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
4002  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
4003  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
4004 
4016 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
4017  VmaAllocator VMA_NOT_NULL allocator,
4018  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
4019  VmaAllocation VMA_NULLABLE allocation);
4020 
4022 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
4023  VmaAllocator VMA_NOT_NULL allocator,
4024  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
4025  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
4026  VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
4027  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
4028  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
4029 
4041 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
4042  VmaAllocator VMA_NOT_NULL allocator,
4043  VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
4044  VmaAllocation VMA_NULLABLE allocation);
4045 
4046 #ifdef __cplusplus
4047 }
4048 #endif
4049 
4050 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
4051 
4052 // For Visual Studio IntelliSense.
4053 #if defined(__cplusplus) && defined(__INTELLISENSE__)
4054 #define VMA_IMPLEMENTATION
4055 #endif
4056 
4057 #ifdef VMA_IMPLEMENTATION
4058 #undef VMA_IMPLEMENTATION
4059 
4060 #include <cstdint>
4061 #include <cstdlib>
4062 #include <cstring>
4063 #include <utility>
4064 
4065 #if VMA_RECORDING_ENABLED
4066  #include <chrono>
4067  #if defined(_WIN32)
4068  #include <windows.h>
4069  #else
4070  #include <sstream>
4071  #include <thread>
4072  #endif
4073 #endif
4074 
4075 /*******************************************************************************
4076 CONFIGURATION SECTION
4077 
4078 Define some of these macros before each #include of this header or change them
4079 here if you need other then default behavior depending on your environment.
4080 */
4081 
4082 /*
4083 Define this macro to 1 to make the library fetch pointers to Vulkan functions
4084 internally, like:
4085 
4086  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
4087 */
4088 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
4089  #define VMA_STATIC_VULKAN_FUNCTIONS 1
4090 #endif
4091 
4092 /*
4093 Define this macro to 1 to make the library fetch pointers to Vulkan functions
4094 internally, like:
4095 
4096  vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory);
4097 */
4098 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
4099  #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
4100  #if defined(VK_NO_PROTOTYPES)
4101  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
4102  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
4103  #endif
4104 #endif
4105 
4106 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
4107 //#define VMA_USE_STL_CONTAINERS 1
4108 
4109 /* Set this macro to 1 to make the library including and using STL containers:
4110 std::pair, std::vector, std::list, std::unordered_map.
4111 
4112 Set it to 0 or undefined to make the library using its own implementation of
4113 the containers.
4114 */
4115 #if VMA_USE_STL_CONTAINERS
4116  #define VMA_USE_STL_VECTOR 1
4117  #define VMA_USE_STL_UNORDERED_MAP 1
4118  #define VMA_USE_STL_LIST 1
4119 #endif
4120 
4121 #ifndef VMA_USE_STL_SHARED_MUTEX
4122  // Compiler conforms to C++17.
4123  #if __cplusplus >= 201703L
4124  #define VMA_USE_STL_SHARED_MUTEX 1
4125  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
4126  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
4127  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
4128  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
4129  #define VMA_USE_STL_SHARED_MUTEX 1
4130  #else
4131  #define VMA_USE_STL_SHARED_MUTEX 0
4132  #endif
4133 #endif
4134 
4135 /*
4136 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
4137 Library has its own container implementation.
4138 */
4139 #if VMA_USE_STL_VECTOR
4140  #include <vector>
4141 #endif
4142 
4143 #if VMA_USE_STL_UNORDERED_MAP
4144  #include <unordered_map>
4145 #endif
4146 
4147 #if VMA_USE_STL_LIST
4148  #include <list>
4149 #endif
4150 
4151 /*
4152 Following headers are used in this CONFIGURATION section only, so feel free to
4153 remove them if not needed.
4154 */
4155 #include <cassert> // for assert
4156 #include <algorithm> // for min, max
4157 #include <mutex>
4158 
4159 #ifndef VMA_NULL
4160  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
4161  #define VMA_NULL nullptr
4162 #endif
4163 
4164 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
4165 #include <cstdlib>
4166 static void* vma_aligned_alloc(size_t alignment, size_t size)
4167 {
4168  // alignment must be >= sizeof(void*)
4169  if(alignment < sizeof(void*))
4170  {
4171  alignment = sizeof(void*);
4172  }
4173 
4174  return memalign(alignment, size);
4175 }
4176 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
4177 #include <cstdlib>
4178 
4179 #if defined(__APPLE__)
4180 #include <AvailabilityMacros.h>
4181 #endif
4182 
4183 static void* vma_aligned_alloc(size_t alignment, size_t size)
4184 {
4185 #if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
4186 #if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
4187  // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
4188  // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
4189  // MAC_OS_X_VERSION_10_16), even though the function is marked
4190  // availabe for 10.15. That's why the preprocessor checks for 10.16 but
4191  // the __builtin_available checks for 10.15.
4192  // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
4193  if (__builtin_available(macOS 10.15, iOS 13, *))
4194  return aligned_alloc(alignment, size);
4195 #endif
4196 #endif
4197  // alignment must be >= sizeof(void*)
4198  if(alignment < sizeof(void*))
4199  {
4200  alignment = sizeof(void*);
4201  }
4202 
4203  void *pointer;
4204  if(posix_memalign(&pointer, alignment, size) == 0)
4205  return pointer;
4206  return VMA_NULL;
4207 }
4208 #elif defined(_WIN32)
4209 static void* vma_aligned_alloc(size_t alignment, size_t size)
4210 {
4211  return _aligned_malloc(size, alignment);
4212 }
4213 #else
4214 static void* vma_aligned_alloc(size_t alignment, size_t size)
4215 {
4216  return aligned_alloc(alignment, size);
4217 }
4218 #endif
4219 
4220 #if defined(_WIN32)
4221 static void vma_aligned_free(void* ptr)
4222 {
4223  _aligned_free(ptr);
4224 }
4225 #else
4226 static void vma_aligned_free(void* VMA_NULLABLE ptr)
4227 {
4228  free(ptr);
4229 }
4230 #endif
4231 
4232 // If your compiler is not compatible with C++11 and definition of
4233 // aligned_alloc() function is missing, uncommeting following line may help:
4234 
4235 //#include <malloc.h>
4236 
4237 // Normal assert to check for programmer's errors, especially in Debug configuration.
4238 #ifndef VMA_ASSERT
4239  #ifdef NDEBUG
4240  #define VMA_ASSERT(expr)
4241  #else
4242  #define VMA_ASSERT(expr) assert(expr)
4243  #endif
4244 #endif
4245 
4246 // Assert that will be called very often, like inside data structures e.g. operator[].
4247 // Making it non-empty can make program slow.
4248 #ifndef VMA_HEAVY_ASSERT
4249  #ifdef NDEBUG
4250  #define VMA_HEAVY_ASSERT(expr)
4251  #else
4252  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
4253  #endif
4254 #endif
4255 
4256 #ifndef VMA_ALIGN_OF
4257  #define VMA_ALIGN_OF(type) (__alignof(type))
4258 #endif
4259 
4260 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
4261  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
4262 #endif
4263 
4264 #ifndef VMA_SYSTEM_ALIGNED_FREE
4265  // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
4266  #if defined(VMA_SYSTEM_FREE)
4267  #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
4268  #else
4269  #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
4270  #endif
4271 #endif
4272 
4273 #ifndef VMA_MIN
4274  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
4275 #endif
4276 
4277 #ifndef VMA_MAX
4278  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
4279 #endif
4280 
4281 #ifndef VMA_SWAP
4282  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
4283 #endif
4284 
4285 #ifndef VMA_SORT
4286  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
4287 #endif
4288 
4289 #ifndef VMA_DEBUG_LOG
4290  #define VMA_DEBUG_LOG(format, ...)
4291  /*
4292  #define VMA_DEBUG_LOG(format, ...) do { \
4293  printf(format, __VA_ARGS__); \
4294  printf("\n"); \
4295  } while(false)
4296  */
4297 #endif
4298 
4299 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
4300 #if VMA_STATS_STRING_ENABLED
4301  static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
4302  {
4303  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
4304  }
4305  static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
4306  {
4307  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
4308  }
4309  static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
4310  {
4311  snprintf(outStr, strLen, "%p", ptr);
4312  }
4313 #endif
4314 
4315 #ifndef VMA_MUTEX
4316  class VmaMutex
4317  {
4318  public:
4319  void Lock() { m_Mutex.lock(); }
4320  void Unlock() { m_Mutex.unlock(); }
4321  bool TryLock() { return m_Mutex.try_lock(); }
4322  private:
4323  std::mutex m_Mutex;
4324  };
4325  #define VMA_MUTEX VmaMutex
4326 #endif
4327 
4328 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
4329 #ifndef VMA_RW_MUTEX
4330  #if VMA_USE_STL_SHARED_MUTEX
4331  // Use std::shared_mutex from C++17.
4332  #include <shared_mutex>
4333  class VmaRWMutex
4334  {
4335  public:
4336  void LockRead() { m_Mutex.lock_shared(); }
4337  void UnlockRead() { m_Mutex.unlock_shared(); }
4338  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
4339  void LockWrite() { m_Mutex.lock(); }
4340  void UnlockWrite() { m_Mutex.unlock(); }
4341  bool TryLockWrite() { return m_Mutex.try_lock(); }
4342  private:
4343  std::shared_mutex m_Mutex;
4344  };
4345  #define VMA_RW_MUTEX VmaRWMutex
4346  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
4347  // Use SRWLOCK from WinAPI.
4348  // Minimum supported client = Windows Vista, server = Windows Server 2008.
4349  class VmaRWMutex
4350  {
4351  public:
4352  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
4353  void LockRead() { AcquireSRWLockShared(&m_Lock); }
4354  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
4355  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
4356  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
4357  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
4358  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
4359  private:
4360  SRWLOCK m_Lock;
4361  };
4362  #define VMA_RW_MUTEX VmaRWMutex
4363  #else
4364  // Less efficient fallback: Use normal mutex.
4365  class VmaRWMutex
4366  {
4367  public:
4368  void LockRead() { m_Mutex.Lock(); }
4369  void UnlockRead() { m_Mutex.Unlock(); }
4370  bool TryLockRead() { return m_Mutex.TryLock(); }
4371  void LockWrite() { m_Mutex.Lock(); }
4372  void UnlockWrite() { m_Mutex.Unlock(); }
4373  bool TryLockWrite() { return m_Mutex.TryLock(); }
4374  private:
4375  VMA_MUTEX m_Mutex;
4376  };
4377  #define VMA_RW_MUTEX VmaRWMutex
4378  #endif // #if VMA_USE_STL_SHARED_MUTEX
4379 #endif // #ifndef VMA_RW_MUTEX
4380 
4381 /*
4382 If providing your own implementation, you need to implement a subset of std::atomic.
4383 */
4384 #ifndef VMA_ATOMIC_UINT32
4385  #include <atomic>
4386  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
4387 #endif
4388 
4389 #ifndef VMA_ATOMIC_UINT64
4390  #include <atomic>
4391  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
4392 #endif
4393 
4394 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
4399  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
4400 #endif
4401 
4402 #ifndef VMA_MIN_ALIGNMENT
4407  #ifdef VMA_DEBUG_ALIGNMENT // Old name
4408  #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
4409  #else
4410  #define VMA_MIN_ALIGNMENT (1)
4411  #endif
4412 #endif
4413 
4414 #ifndef VMA_DEBUG_MARGIN
4419  #define VMA_DEBUG_MARGIN (0)
4420 #endif
4421 
4422 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
4427  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
4428 #endif
4429 
4430 #ifndef VMA_DEBUG_DETECT_CORRUPTION
4436  #define VMA_DEBUG_DETECT_CORRUPTION (0)
4437 #endif
4438 
4439 #ifndef VMA_DEBUG_GLOBAL_MUTEX
4444  #define VMA_DEBUG_GLOBAL_MUTEX (0)
4445 #endif
4446 
4447 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4452  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
4453 #endif
4454 
4455 #ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
4456  /*
4457  Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
4458  and return error instead of leaving up to Vulkan implementation what to do in such cases.
4459  */
4460  #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
4461 #endif
4462 
4463 #ifndef VMA_SMALL_HEAP_MAX_SIZE
4465  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
4466 #endif
4467 
4468 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
4470  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
4471 #endif
4472 
4473 #ifndef VMA_CLASS_NO_COPY
4474  #define VMA_CLASS_NO_COPY(className) \
4475  private: \
4476  className(const className&) = delete; \
4477  className& operator=(const className&) = delete;
4478 #endif
4479 
4480 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4481 
4482 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4483 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4484 
4485 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4486 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4487 
4488 /*******************************************************************************
4489 END OF CONFIGURATION
4490 */
4491 
4492 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4493 
4494 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4495 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4496 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4497 
4498 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4499 
4500 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4501  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4502 
4503 // Returns number of bits set to 1 in (v).
4504 static inline uint32_t VmaCountBitsSet(uint32_t v)
4505 {
4506  uint32_t c = v - ((v >> 1) & 0x55555555);
4507  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4508  c = ((c >> 4) + c) & 0x0F0F0F0F;
4509  c = ((c >> 8) + c) & 0x00FF00FF;
4510  c = ((c >> 16) + c) & 0x0000FFFF;
4511  return c;
4512 }
4513 
4514 /*
4515 Returns true if given number is a power of two.
4516 T must be unsigned integer number or signed integer but always nonnegative.
4517 For 0 returns true.
4518 */
4519 template <typename T>
4520 inline bool VmaIsPow2(T x)
4521 {
4522  return (x & (x-1)) == 0;
4523 }
4524 
4525 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4526 // Use types like uint32_t, uint64_t as T.
4527 template <typename T>
4528 static inline T VmaAlignUp(T val, T alignment)
4529 {
4530  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4531  return (val + alignment - 1) & ~(alignment - 1);
4532 }
4533 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4534 // Use types like uint32_t, uint64_t as T.
4535 template <typename T>
4536 static inline T VmaAlignDown(T val, T alignment)
4537 {
4538  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4539  return val & ~(alignment - 1);
4540 }
4541 
4542 // Division with mathematical rounding to nearest number.
4543 template <typename T>
4544 static inline T VmaRoundDiv(T x, T y)
4545 {
4546  return (x + (y / (T)2)) / y;
4547 }
4548 
4549 // Returns smallest power of 2 greater or equal to v.
4550 static inline uint32_t VmaNextPow2(uint32_t v)
4551 {
4552  v--;
4553  v |= v >> 1;
4554  v |= v >> 2;
4555  v |= v >> 4;
4556  v |= v >> 8;
4557  v |= v >> 16;
4558  v++;
4559  return v;
4560 }
4561 static inline uint64_t VmaNextPow2(uint64_t v)
4562 {
4563  v--;
4564  v |= v >> 1;
4565  v |= v >> 2;
4566  v |= v >> 4;
4567  v |= v >> 8;
4568  v |= v >> 16;
4569  v |= v >> 32;
4570  v++;
4571  return v;
4572 }
4573 
4574 // Returns largest power of 2 less or equal to v.
4575 static inline uint32_t VmaPrevPow2(uint32_t v)
4576 {
4577  v |= v >> 1;
4578  v |= v >> 2;
4579  v |= v >> 4;
4580  v |= v >> 8;
4581  v |= v >> 16;
4582  v = v ^ (v >> 1);
4583  return v;
4584 }
4585 static inline uint64_t VmaPrevPow2(uint64_t v)
4586 {
4587  v |= v >> 1;
4588  v |= v >> 2;
4589  v |= v >> 4;
4590  v |= v >> 8;
4591  v |= v >> 16;
4592  v |= v >> 32;
4593  v = v ^ (v >> 1);
4594  return v;
4595 }
4596 
4597 static inline bool VmaStrIsEmpty(const char* pStr)
4598 {
4599  return pStr == VMA_NULL || *pStr == '\0';
4600 }
4601 
4602 #if VMA_STATS_STRING_ENABLED
4603 
4604 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4605 {
4606  switch(algorithm)
4607  {
4609  return "Linear";
4611  return "Buddy";
4612  case 0:
4613  return "Default";
4614  default:
4615  VMA_ASSERT(0);
4616  return "";
4617  }
4618 }
4619 
4620 #endif // #if VMA_STATS_STRING_ENABLED
4621 
4622 #ifndef VMA_SORT
4623 
4624 template<typename Iterator, typename Compare>
4625 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4626 {
4627  Iterator centerValue = end; --centerValue;
4628  Iterator insertIndex = beg;
4629  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4630  {
4631  if(cmp(*memTypeIndex, *centerValue))
4632  {
4633  if(insertIndex != memTypeIndex)
4634  {
4635  VMA_SWAP(*memTypeIndex, *insertIndex);
4636  }
4637  ++insertIndex;
4638  }
4639  }
4640  if(insertIndex != centerValue)
4641  {
4642  VMA_SWAP(*insertIndex, *centerValue);
4643  }
4644  return insertIndex;
4645 }
4646 
4647 template<typename Iterator, typename Compare>
4648 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4649 {
4650  if(beg < end)
4651  {
4652  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4653  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4654  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4655  }
4656 }
4657 
4658 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4659 
4660 #endif // #ifndef VMA_SORT
4661 
4662 /*
4663 Returns true if two memory blocks occupy overlapping pages.
4664 ResourceA must be in less memory offset than ResourceB.
4665 
4666 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4667 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4668 */
4669 static inline bool VmaBlocksOnSamePage(
4670  VkDeviceSize resourceAOffset,
4671  VkDeviceSize resourceASize,
4672  VkDeviceSize resourceBOffset,
4673  VkDeviceSize pageSize)
4674 {
4675  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4676  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4677  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4678  VkDeviceSize resourceBStart = resourceBOffset;
4679  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4680  return resourceAEndPage == resourceBStartPage;
4681 }
4682 
4683 enum VmaSuballocationType
4684 {
4685  VMA_SUBALLOCATION_TYPE_FREE = 0,
4686  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4687  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4688  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4689  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4690  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4691  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4692 };
4693 
4694 /*
4695 Returns true if given suballocation types could conflict and must respect
4696 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4697 or linear image and another one is optimal image. If type is unknown, behave
4698 conservatively.
4699 */
4700 static inline bool VmaIsBufferImageGranularityConflict(
4701  VmaSuballocationType suballocType1,
4702  VmaSuballocationType suballocType2)
4703 {
4704  if(suballocType1 > suballocType2)
4705  {
4706  VMA_SWAP(suballocType1, suballocType2);
4707  }
4708 
4709  switch(suballocType1)
4710  {
4711  case VMA_SUBALLOCATION_TYPE_FREE:
4712  return false;
4713  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4714  return true;
4715  case VMA_SUBALLOCATION_TYPE_BUFFER:
4716  return
4717  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4718  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4719  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4720  return
4721  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4722  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4723  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4724  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4725  return
4726  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4727  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4728  return false;
4729  default:
4730  VMA_ASSERT(0);
4731  return true;
4732  }
4733 }
4734 
4735 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4736 {
4737 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4738  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4739  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4740  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4741  {
4742  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4743  }
4744 #else
4745  // no-op
4746 #endif
4747 }
4748 
4749 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4750 {
4751 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4752  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4753  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4754  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4755  {
4756  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4757  {
4758  return false;
4759  }
4760  }
4761 #endif
4762  return true;
4763 }
4764 
4765 /*
4766 Fills structure with parameters of an example buffer to be used for transfers
4767 during GPU memory defragmentation.
4768 */
4769 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4770 {
4771  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4772  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4773  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4774  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4775 }
4776 
4777 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4778 struct VmaMutexLock
4779 {
4780  VMA_CLASS_NO_COPY(VmaMutexLock)
4781 public:
4782  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4783  m_pMutex(useMutex ? &mutex : VMA_NULL)
4784  { if(m_pMutex) { m_pMutex->Lock(); } }
4785  ~VmaMutexLock()
4786  { if(m_pMutex) { m_pMutex->Unlock(); } }
4787 private:
4788  VMA_MUTEX* m_pMutex;
4789 };
4790 
4791 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4792 struct VmaMutexLockRead
4793 {
4794  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4795 public:
4796  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4797  m_pMutex(useMutex ? &mutex : VMA_NULL)
4798  { if(m_pMutex) { m_pMutex->LockRead(); } }
4799  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4800 private:
4801  VMA_RW_MUTEX* m_pMutex;
4802 };
4803 
4804 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4805 struct VmaMutexLockWrite
4806 {
4807  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4808 public:
4809  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4810  m_pMutex(useMutex ? &mutex : VMA_NULL)
4811  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4812  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4813 private:
4814  VMA_RW_MUTEX* m_pMutex;
4815 };
4816 
4817 #if VMA_DEBUG_GLOBAL_MUTEX
4818  static VMA_MUTEX gDebugGlobalMutex;
4819  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4820 #else
4821  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4822 #endif
4823 
4824 // Minimum size of a free suballocation to register it in the free suballocation collection.
4825 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4826 
4827 /*
4828 Performs binary search and returns iterator to first element that is greater or
4829 equal to (key), according to comparison (cmp).
4830 
4831 Cmp should return true if first argument is less than second argument.
4832 
4833 Returned value is the found element, if present in the collection or place where
4834 new element with value (key) should be inserted.
4835 */
4836 template <typename CmpLess, typename IterT, typename KeyT>
4837 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4838 {
4839  size_t down = 0, up = (end - beg);
4840  while(down < up)
4841  {
4842  const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
4843  if(cmp(*(beg+mid), key))
4844  {
4845  down = mid + 1;
4846  }
4847  else
4848  {
4849  up = mid;
4850  }
4851  }
4852  return beg + down;
4853 }
4854 
4855 template<typename CmpLess, typename IterT, typename KeyT>
4856 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4857 {
4858  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4859  beg, end, value, cmp);
4860  if(it == end ||
4861  (!cmp(*it, value) && !cmp(value, *it)))
4862  {
4863  return it;
4864  }
4865  return end;
4866 }
4867 
4868 /*
4869 Returns true if all pointers in the array are not-null and unique.
4870 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4871 T must be pointer type, e.g. VmaAllocation, VmaPool.
4872 */
4873 template<typename T>
4874 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4875 {
4876  for(uint32_t i = 0; i < count; ++i)
4877  {
4878  const T iPtr = arr[i];
4879  if(iPtr == VMA_NULL)
4880  {
4881  return false;
4882  }
4883  for(uint32_t j = i + 1; j < count; ++j)
4884  {
4885  if(iPtr == arr[j])
4886  {
4887  return false;
4888  }
4889  }
4890  }
4891  return true;
4892 }
4893 
4894 template<typename MainT, typename NewT>
4895 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4896 {
4897  newStruct->pNext = mainStruct->pNext;
4898  mainStruct->pNext = newStruct;
4899 }
4900 
4902 // Memory allocation
4903 
4904 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4905 {
4906  void* result = VMA_NULL;
4907  if((pAllocationCallbacks != VMA_NULL) &&
4908  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4909  {
4910  result = (*pAllocationCallbacks->pfnAllocation)(
4911  pAllocationCallbacks->pUserData,
4912  size,
4913  alignment,
4914  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4915  }
4916  else
4917  {
4918  result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4919  }
4920  VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
4921  return result;
4922 }
4923 
4924 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4925 {
4926  if((pAllocationCallbacks != VMA_NULL) &&
4927  (pAllocationCallbacks->pfnFree != VMA_NULL))
4928  {
4929  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4930  }
4931  else
4932  {
4933  VMA_SYSTEM_ALIGNED_FREE(ptr);
4934  }
4935 }
4936 
4937 template<typename T>
4938 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4939 {
4940  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4941 }
4942 
4943 template<typename T>
4944 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4945 {
4946  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4947 }
4948 
4949 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4950 
4951 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4952 
4953 template<typename T>
4954 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4955 {
4956  ptr->~T();
4957  VmaFree(pAllocationCallbacks, ptr);
4958 }
4959 
4960 template<typename T>
4961 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4962 {
4963  if(ptr != VMA_NULL)
4964  {
4965  for(size_t i = count; i--; )
4966  {
4967  ptr[i].~T();
4968  }
4969  VmaFree(pAllocationCallbacks, ptr);
4970  }
4971 }
4972 
4973 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4974 {
4975  if(srcStr != VMA_NULL)
4976  {
4977  const size_t len = strlen(srcStr);
4978  char* const result = vma_new_array(allocs, char, len + 1);
4979  memcpy(result, srcStr, len + 1);
4980  return result;
4981  }
4982  else
4983  {
4984  return VMA_NULL;
4985  }
4986 }
4987 
4988 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4989 {
4990  if(str != VMA_NULL)
4991  {
4992  const size_t len = strlen(str);
4993  vma_delete_array(allocs, str, len + 1);
4994  }
4995 }
4996 
4997 // STL-compatible allocator.
4998 template<typename T>
4999 class VmaStlAllocator
5000 {
5001 public:
5002  const VkAllocationCallbacks* const m_pCallbacks;
5003  typedef T value_type;
5004 
5005  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
5006  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
5007 
5008  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
5009  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
5010 
5011  template<typename U>
5012  bool operator==(const VmaStlAllocator<U>& rhs) const
5013  {
5014  return m_pCallbacks == rhs.m_pCallbacks;
5015  }
5016  template<typename U>
5017  bool operator!=(const VmaStlAllocator<U>& rhs) const
5018  {
5019  return m_pCallbacks != rhs.m_pCallbacks;
5020  }
5021 
5022  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
5023  VmaStlAllocator(const VmaStlAllocator&) = default;
5024 };
5025 
5026 #if VMA_USE_STL_VECTOR
5027 
5028 #define VmaVector std::vector
5029 
5030 template<typename T, typename allocatorT>
5031 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
5032 {
5033  vec.insert(vec.begin() + index, item);
5034 }
5035 
5036 template<typename T, typename allocatorT>
5037 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
5038 {
5039  vec.erase(vec.begin() + index);
5040 }
5041 
5042 #else // #if VMA_USE_STL_VECTOR
5043 
5044 /* Class with interface compatible with subset of std::vector.
5045 T must be POD because constructors and destructors are not called and memcpy is
5046 used for these objects. */
5047 template<typename T, typename AllocatorT>
5048 class VmaVector
5049 {
5050 public:
5051  typedef T value_type;
5052 
5053  VmaVector(const AllocatorT& allocator) :
5054  m_Allocator(allocator),
5055  m_pArray(VMA_NULL),
5056  m_Count(0),
5057  m_Capacity(0)
5058  {
5059  }
5060 
5061  VmaVector(size_t count, const AllocatorT& allocator) :
5062  m_Allocator(allocator),
5063  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
5064  m_Count(count),
5065  m_Capacity(count)
5066  {
5067  }
5068 
5069  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
5070  // value is unused.
5071  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
5072  : VmaVector(count, allocator) {}
5073 
5074  VmaVector(const VmaVector<T, AllocatorT>& src) :
5075  m_Allocator(src.m_Allocator),
5076  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
5077  m_Count(src.m_Count),
5078  m_Capacity(src.m_Count)
5079  {
5080  if(m_Count != 0)
5081  {
5082  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
5083  }
5084  }
5085 
5086  ~VmaVector()
5087  {
5088  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5089  }
5090 
5091  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
5092  {
5093  if(&rhs != this)
5094  {
5095  resize(rhs.m_Count);
5096  if(m_Count != 0)
5097  {
5098  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
5099  }
5100  }
5101  return *this;
5102  }
5103 
5104  bool empty() const { return m_Count == 0; }
5105  size_t size() const { return m_Count; }
5106  T* data() { return m_pArray; }
5107  const T* data() const { return m_pArray; }
5108 
5109  T& operator[](size_t index)
5110  {
5111  VMA_HEAVY_ASSERT(index < m_Count);
5112  return m_pArray[index];
5113  }
5114  const T& operator[](size_t index) const
5115  {
5116  VMA_HEAVY_ASSERT(index < m_Count);
5117  return m_pArray[index];
5118  }
5119 
5120  T& front()
5121  {
5122  VMA_HEAVY_ASSERT(m_Count > 0);
5123  return m_pArray[0];
5124  }
5125  const T& front() const
5126  {
5127  VMA_HEAVY_ASSERT(m_Count > 0);
5128  return m_pArray[0];
5129  }
5130  T& back()
5131  {
5132  VMA_HEAVY_ASSERT(m_Count > 0);
5133  return m_pArray[m_Count - 1];
5134  }
5135  const T& back() const
5136  {
5137  VMA_HEAVY_ASSERT(m_Count > 0);
5138  return m_pArray[m_Count - 1];
5139  }
5140 
5141  void reserve(size_t newCapacity, bool freeMemory = false)
5142  {
5143  newCapacity = VMA_MAX(newCapacity, m_Count);
5144 
5145  if((newCapacity < m_Capacity) && !freeMemory)
5146  {
5147  newCapacity = m_Capacity;
5148  }
5149 
5150  if(newCapacity != m_Capacity)
5151  {
5152  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
5153  if(m_Count != 0)
5154  {
5155  memcpy(newArray, m_pArray, m_Count * sizeof(T));
5156  }
5157  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5158  m_Capacity = newCapacity;
5159  m_pArray = newArray;
5160  }
5161  }
5162 
5163  void resize(size_t newCount)
5164  {
5165  size_t newCapacity = m_Capacity;
5166  if(newCount > m_Capacity)
5167  {
5168  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
5169  }
5170 
5171  if(newCapacity != m_Capacity)
5172  {
5173  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
5174  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
5175  if(elementsToCopy != 0)
5176  {
5177  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
5178  }
5179  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5180  m_Capacity = newCapacity;
5181  m_pArray = newArray;
5182  }
5183 
5184  m_Count = newCount;
5185  }
5186 
5187  void clear()
5188  {
5189  resize(0);
5190  }
5191 
5192  void shrink_to_fit()
5193  {
5194  if(m_Capacity > m_Count)
5195  {
5196  T* newArray = VMA_NULL;
5197  if(m_Count > 0)
5198  {
5199  newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
5200  memcpy(newArray, m_pArray, m_Count * sizeof(T));
5201  }
5202  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5203  m_Capacity = m_Count;
5204  m_pArray = newArray;
5205  }
5206  }
5207 
5208  void insert(size_t index, const T& src)
5209  {
5210  VMA_HEAVY_ASSERT(index <= m_Count);
5211  const size_t oldCount = size();
5212  resize(oldCount + 1);
5213  if(index < oldCount)
5214  {
5215  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
5216  }
5217  m_pArray[index] = src;
5218  }
5219 
5220  void remove(size_t index)
5221  {
5222  VMA_HEAVY_ASSERT(index < m_Count);
5223  const size_t oldCount = size();
5224  if(index < oldCount - 1)
5225  {
5226  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
5227  }
5228  resize(oldCount - 1);
5229  }
5230 
5231  void push_back(const T& src)
5232  {
5233  const size_t newIndex = size();
5234  resize(newIndex + 1);
5235  m_pArray[newIndex] = src;
5236  }
5237 
5238  void pop_back()
5239  {
5240  VMA_HEAVY_ASSERT(m_Count > 0);
5241  resize(size() - 1);
5242  }
5243 
5244  void push_front(const T& src)
5245  {
5246  insert(0, src);
5247  }
5248 
5249  void pop_front()
5250  {
5251  VMA_HEAVY_ASSERT(m_Count > 0);
5252  remove(0);
5253  }
5254 
5255  typedef T* iterator;
5256  typedef const T* const_iterator;
5257 
5258  iterator begin() { return m_pArray; }
5259  iterator end() { return m_pArray + m_Count; }
5260  const_iterator cbegin() const { return m_pArray; }
5261  const_iterator cend() const { return m_pArray + m_Count; }
5262  const_iterator begin() const { return cbegin(); }
5263  const_iterator end() const { return cend(); }
5264 
5265 private:
5266  AllocatorT m_Allocator;
5267  T* m_pArray;
5268  size_t m_Count;
5269  size_t m_Capacity;
5270 };
5271 
5272 template<typename T, typename allocatorT>
5273 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
5274 {
5275  vec.insert(index, item);
5276 }
5277 
5278 template<typename T, typename allocatorT>
5279 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
5280 {
5281  vec.remove(index);
5282 }
5283 
5284 #endif // #if VMA_USE_STL_VECTOR
5285 
5286 template<typename CmpLess, typename VectorT>
5287 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
5288 {
5289  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5290  vector.data(),
5291  vector.data() + vector.size(),
5292  value,
5293  CmpLess()) - vector.data();
5294  VmaVectorInsert(vector, indexToInsert, value);
5295  return indexToInsert;
5296 }
5297 
5298 template<typename CmpLess, typename VectorT>
5299 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
5300 {
5301  CmpLess comparator;
5302  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
5303  vector.begin(),
5304  vector.end(),
5305  value,
5306  comparator);
5307  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
5308  {
5309  size_t indexToRemove = it - vector.begin();
5310  VmaVectorRemove(vector, indexToRemove);
5311  return true;
5312  }
5313  return false;
5314 }
5315 
5317 // class VmaSmallVector
5318 
5319 /*
5320 This is a vector (a variable-sized array), optimized for the case when the array is small.
5321 
5322 It contains some number of elements in-place, which allows it to avoid heap allocation
5323 when the actual number of elements is below that threshold. This allows normal "small"
5324 cases to be fast without losing generality for large inputs.
5325 */
5326 
5327 template<typename T, typename AllocatorT, size_t N>
5328 class VmaSmallVector
5329 {
5330 public:
5331  typedef T value_type;
5332 
5333  VmaSmallVector(const AllocatorT& allocator) :
5334  m_Count(0),
5335  m_DynamicArray(allocator)
5336  {
5337  }
5338  VmaSmallVector(size_t count, const AllocatorT& allocator) :
5339  m_Count(count),
5340  m_DynamicArray(count > N ? count : 0, allocator)
5341  {
5342  }
5343  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5344  VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) = delete;
5345  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5346  VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) = delete;
5347 
5348  bool empty() const { return m_Count == 0; }
5349  size_t size() const { return m_Count; }
5350  T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5351  const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5352 
5353  T& operator[](size_t index)
5354  {
5355  VMA_HEAVY_ASSERT(index < m_Count);
5356  return data()[index];
5357  }
5358  const T& operator[](size_t index) const
5359  {
5360  VMA_HEAVY_ASSERT(index < m_Count);
5361  return data()[index];
5362  }
5363 
5364  T& front()
5365  {
5366  VMA_HEAVY_ASSERT(m_Count > 0);
5367  return data()[0];
5368  }
5369  const T& front() const
5370  {
5371  VMA_HEAVY_ASSERT(m_Count > 0);
5372  return data()[0];
5373  }
5374  T& back()
5375  {
5376  VMA_HEAVY_ASSERT(m_Count > 0);
5377  return data()[m_Count - 1];
5378  }
5379  const T& back() const
5380  {
5381  VMA_HEAVY_ASSERT(m_Count > 0);
5382  return data()[m_Count - 1];
5383  }
5384 
5385  void resize(size_t newCount, bool freeMemory = false)
5386  {
5387  if(newCount > N && m_Count > N)
5388  {
5389  // Any direction, staying in m_DynamicArray
5390  m_DynamicArray.resize(newCount);
5391  if(freeMemory)
5392  {
5393  m_DynamicArray.shrink_to_fit();
5394  }
5395  }
5396  else if(newCount > N && m_Count <= N)
5397  {
5398  // Growing, moving from m_StaticArray to m_DynamicArray
5399  m_DynamicArray.resize(newCount);
5400  if(m_Count > 0)
5401  {
5402  memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
5403  }
5404  }
5405  else if(newCount <= N && m_Count > N)
5406  {
5407  // Shrinking, moving from m_DynamicArray to m_StaticArray
5408  if(newCount > 0)
5409  {
5410  memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
5411  }
5412  m_DynamicArray.resize(0);
5413  if(freeMemory)
5414  {
5415  m_DynamicArray.shrink_to_fit();
5416  }
5417  }
5418  else
5419  {
5420  // Any direction, staying in m_StaticArray - nothing to do here
5421  }
5422  m_Count = newCount;
5423  }
5424 
5425  void clear(bool freeMemory = false)
5426  {
5427  m_DynamicArray.clear();
5428  if(freeMemory)
5429  {
5430  m_DynamicArray.shrink_to_fit();
5431  }
5432  m_Count = 0;
5433  }
5434 
5435  void insert(size_t index, const T& src)
5436  {
5437  VMA_HEAVY_ASSERT(index <= m_Count);
5438  const size_t oldCount = size();
5439  resize(oldCount + 1);
5440  T* const dataPtr = data();
5441  if(index < oldCount)
5442  {
5443  // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
5444  memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
5445  }
5446  dataPtr[index] = src;
5447  }
5448 
5449  void remove(size_t index)
5450  {
5451  VMA_HEAVY_ASSERT(index < m_Count);
5452  const size_t oldCount = size();
5453  if(index < oldCount - 1)
5454  {
5455  // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
5456  T* const dataPtr = data();
5457  memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
5458  }
5459  resize(oldCount - 1);
5460  }
5461 
5462  void push_back(const T& src)
5463  {
5464  const size_t newIndex = size();
5465  resize(newIndex + 1);
5466  data()[newIndex] = src;
5467  }
5468 
5469  void pop_back()
5470  {
5471  VMA_HEAVY_ASSERT(m_Count > 0);
5472  resize(size() - 1);
5473  }
5474 
5475  void push_front(const T& src)
5476  {
5477  insert(0, src);
5478  }
5479 
5480  void pop_front()
5481  {
5482  VMA_HEAVY_ASSERT(m_Count > 0);
5483  remove(0);
5484  }
5485 
5486  typedef T* iterator;
5487 
5488  iterator begin() { return data(); }
5489  iterator end() { return data() + m_Count; }
5490 
5491 private:
5492  size_t m_Count;
5493  T m_StaticArray[N]; // Used when m_Size <= N
5494  VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
5495 };
5496 
5498 // class VmaPoolAllocator
5499 
5500 /*
5501 Allocator for objects of type T using a list of arrays (pools) to speed up
5502 allocation. Number of elements that can be allocated is not bounded because
5503 allocator can create multiple blocks.
5504 */
5505 template<typename T>
5506 class VmaPoolAllocator
5507 {
5508  VMA_CLASS_NO_COPY(VmaPoolAllocator)
5509 public:
5510  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
5511  ~VmaPoolAllocator();
5512  template<typename... Types> T* Alloc(Types... args);
5513  void Free(T* ptr);
5514 
5515 private:
5516  union Item
5517  {
5518  uint32_t NextFreeIndex;
5519  alignas(T) char Value[sizeof(T)];
5520  };
5521 
5522  struct ItemBlock
5523  {
5524  Item* pItems;
5525  uint32_t Capacity;
5526  uint32_t FirstFreeIndex;
5527  };
5528 
5529  const VkAllocationCallbacks* m_pAllocationCallbacks;
5530  const uint32_t m_FirstBlockCapacity;
5531  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
5532 
5533  ItemBlock& CreateNewBlock();
5534 };
5535 
5536 template<typename T>
5537 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
5538  m_pAllocationCallbacks(pAllocationCallbacks),
5539  m_FirstBlockCapacity(firstBlockCapacity),
5540  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
5541 {
5542  VMA_ASSERT(m_FirstBlockCapacity > 1);
5543 }
5544 
5545 template<typename T>
5546 VmaPoolAllocator<T>::~VmaPoolAllocator()
5547 {
5548  for(size_t i = m_ItemBlocks.size(); i--; )
5549  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
5550  m_ItemBlocks.clear();
5551 }
5552 
5553 template<typename T>
5554 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
5555 {
5556  for(size_t i = m_ItemBlocks.size(); i--; )
5557  {
5558  ItemBlock& block = m_ItemBlocks[i];
5559  // This block has some free items: Use first one.
5560  if(block.FirstFreeIndex != UINT32_MAX)
5561  {
5562  Item* const pItem = &block.pItems[block.FirstFreeIndex];
5563  block.FirstFreeIndex = pItem->NextFreeIndex;
5564  T* result = (T*)&pItem->Value;
5565  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5566  return result;
5567  }
5568  }
5569 
5570  // No block has free item: Create new one and use it.
5571  ItemBlock& newBlock = CreateNewBlock();
5572  Item* const pItem = &newBlock.pItems[0];
5573  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
5574  T* result = (T*)&pItem->Value;
5575  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5576  return result;
5577 }
5578 
5579 template<typename T>
5580 void VmaPoolAllocator<T>::Free(T* ptr)
5581 {
5582  // Search all memory blocks to find ptr.
5583  for(size_t i = m_ItemBlocks.size(); i--; )
5584  {
5585  ItemBlock& block = m_ItemBlocks[i];
5586 
5587  // Casting to union.
5588  Item* pItemPtr;
5589  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
5590 
5591  // Check if pItemPtr is in address range of this block.
5592  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
5593  {
5594  ptr->~T(); // Explicit destructor call.
5595  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
5596  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
5597  block.FirstFreeIndex = index;
5598  return;
5599  }
5600  }
5601  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
5602 }
5603 
5604 template<typename T>
5605 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
5606 {
5607  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
5608  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
5609 
5610  const ItemBlock newBlock = {
5611  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
5612  newBlockCapacity,
5613  0 };
5614 
5615  m_ItemBlocks.push_back(newBlock);
5616 
5617  // Setup singly-linked list of all free items in this block.
5618  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
5619  newBlock.pItems[i].NextFreeIndex = i + 1;
5620  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
5621  return m_ItemBlocks.back();
5622 }
5623 
5625 // class VmaRawList, VmaList
5626 
5627 #if VMA_USE_STL_LIST
5628 
5629 #define VmaList std::list
5630 
5631 #else // #if VMA_USE_STL_LIST
5632 
5633 template<typename T>
5634 struct VmaListItem
5635 {
5636  VmaListItem* pPrev;
5637  VmaListItem* pNext;
5638  T Value;
5639 };
5640 
5641 // Doubly linked list.
5642 template<typename T>
5643 class VmaRawList
5644 {
5645  VMA_CLASS_NO_COPY(VmaRawList)
5646 public:
5647  typedef VmaListItem<T> ItemType;
5648 
5649  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
5650  ~VmaRawList();
5651  void Clear();
5652 
5653  size_t GetCount() const { return m_Count; }
5654  bool IsEmpty() const { return m_Count == 0; }
5655 
5656  ItemType* Front() { return m_pFront; }
5657  const ItemType* Front() const { return m_pFront; }
5658  ItemType* Back() { return m_pBack; }
5659  const ItemType* Back() const { return m_pBack; }
5660 
5661  ItemType* PushBack();
5662  ItemType* PushFront();
5663  ItemType* PushBack(const T& value);
5664  ItemType* PushFront(const T& value);
5665  void PopBack();
5666  void PopFront();
5667 
5668  // Item can be null - it means PushBack.
5669  ItemType* InsertBefore(ItemType* pItem);
5670  // Item can be null - it means PushFront.
5671  ItemType* InsertAfter(ItemType* pItem);
5672 
5673  ItemType* InsertBefore(ItemType* pItem, const T& value);
5674  ItemType* InsertAfter(ItemType* pItem, const T& value);
5675 
5676  void Remove(ItemType* pItem);
5677 
5678 private:
5679  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5680  VmaPoolAllocator<ItemType> m_ItemAllocator;
5681  ItemType* m_pFront;
5682  ItemType* m_pBack;
5683  size_t m_Count;
5684 };
5685 
5686 template<typename T>
5687 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5688  m_pAllocationCallbacks(pAllocationCallbacks),
5689  m_ItemAllocator(pAllocationCallbacks, 128),
5690  m_pFront(VMA_NULL),
5691  m_pBack(VMA_NULL),
5692  m_Count(0)
5693 {
5694 }
5695 
5696 template<typename T>
5697 VmaRawList<T>::~VmaRawList()
5698 {
5699  // Intentionally not calling Clear, because that would be unnecessary
5700  // computations to return all items to m_ItemAllocator as free.
5701 }
5702 
5703 template<typename T>
5704 void VmaRawList<T>::Clear()
5705 {
5706  if(IsEmpty() == false)
5707  {
5708  ItemType* pItem = m_pBack;
5709  while(pItem != VMA_NULL)
5710  {
5711  ItemType* const pPrevItem = pItem->pPrev;
5712  m_ItemAllocator.Free(pItem);
5713  pItem = pPrevItem;
5714  }
5715  m_pFront = VMA_NULL;
5716  m_pBack = VMA_NULL;
5717  m_Count = 0;
5718  }
5719 }
5720 
5721 template<typename T>
5722 VmaListItem<T>* VmaRawList<T>::PushBack()
5723 {
5724  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5725  pNewItem->pNext = VMA_NULL;
5726  if(IsEmpty())
5727  {
5728  pNewItem->pPrev = VMA_NULL;
5729  m_pFront = pNewItem;
5730  m_pBack = pNewItem;
5731  m_Count = 1;
5732  }
5733  else
5734  {
5735  pNewItem->pPrev = m_pBack;
5736  m_pBack->pNext = pNewItem;
5737  m_pBack = pNewItem;
5738  ++m_Count;
5739  }
5740  return pNewItem;
5741 }
5742 
5743 template<typename T>
5744 VmaListItem<T>* VmaRawList<T>::PushFront()
5745 {
5746  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5747  pNewItem->pPrev = VMA_NULL;
5748  if(IsEmpty())
5749  {
5750  pNewItem->pNext = VMA_NULL;
5751  m_pFront = pNewItem;
5752  m_pBack = pNewItem;
5753  m_Count = 1;
5754  }
5755  else
5756  {
5757  pNewItem->pNext = m_pFront;
5758  m_pFront->pPrev = pNewItem;
5759  m_pFront = pNewItem;
5760  ++m_Count;
5761  }
5762  return pNewItem;
5763 }
5764 
5765 template<typename T>
5766 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5767 {
5768  ItemType* const pNewItem = PushBack();
5769  pNewItem->Value = value;
5770  return pNewItem;
5771 }
5772 
5773 template<typename T>
5774 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5775 {
5776  ItemType* const pNewItem = PushFront();
5777  pNewItem->Value = value;
5778  return pNewItem;
5779 }
5780 
5781 template<typename T>
5782 void VmaRawList<T>::PopBack()
5783 {
5784  VMA_HEAVY_ASSERT(m_Count > 0);
5785  ItemType* const pBackItem = m_pBack;
5786  ItemType* const pPrevItem = pBackItem->pPrev;
5787  if(pPrevItem != VMA_NULL)
5788  {
5789  pPrevItem->pNext = VMA_NULL;
5790  }
5791  m_pBack = pPrevItem;
5792  m_ItemAllocator.Free(pBackItem);
5793  --m_Count;
5794 }
5795 
5796 template<typename T>
5797 void VmaRawList<T>::PopFront()
5798 {
5799  VMA_HEAVY_ASSERT(m_Count > 0);
5800  ItemType* const pFrontItem = m_pFront;
5801  ItemType* const pNextItem = pFrontItem->pNext;
5802  if(pNextItem != VMA_NULL)
5803  {
5804  pNextItem->pPrev = VMA_NULL;
5805  }
5806  m_pFront = pNextItem;
5807  m_ItemAllocator.Free(pFrontItem);
5808  --m_Count;
5809 }
5810 
5811 template<typename T>
5812 void VmaRawList<T>::Remove(ItemType* pItem)
5813 {
5814  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5815  VMA_HEAVY_ASSERT(m_Count > 0);
5816 
5817  if(pItem->pPrev != VMA_NULL)
5818  {
5819  pItem->pPrev->pNext = pItem->pNext;
5820  }
5821  else
5822  {
5823  VMA_HEAVY_ASSERT(m_pFront == pItem);
5824  m_pFront = pItem->pNext;
5825  }
5826 
5827  if(pItem->pNext != VMA_NULL)
5828  {
5829  pItem->pNext->pPrev = pItem->pPrev;
5830  }
5831  else
5832  {
5833  VMA_HEAVY_ASSERT(m_pBack == pItem);
5834  m_pBack = pItem->pPrev;
5835  }
5836 
5837  m_ItemAllocator.Free(pItem);
5838  --m_Count;
5839 }
5840 
5841 template<typename T>
5842 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5843 {
5844  if(pItem != VMA_NULL)
5845  {
5846  ItemType* const prevItem = pItem->pPrev;
5847  ItemType* const newItem = m_ItemAllocator.Alloc();
5848  newItem->pPrev = prevItem;
5849  newItem->pNext = pItem;
5850  pItem->pPrev = newItem;
5851  if(prevItem != VMA_NULL)
5852  {
5853  prevItem->pNext = newItem;
5854  }
5855  else
5856  {
5857  VMA_HEAVY_ASSERT(m_pFront == pItem);
5858  m_pFront = newItem;
5859  }
5860  ++m_Count;
5861  return newItem;
5862  }
5863  else
5864  return PushBack();
5865 }
5866 
5867 template<typename T>
5868 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5869 {
5870  if(pItem != VMA_NULL)
5871  {
5872  ItemType* const nextItem = pItem->pNext;
5873  ItemType* const newItem = m_ItemAllocator.Alloc();
5874  newItem->pNext = nextItem;
5875  newItem->pPrev = pItem;
5876  pItem->pNext = newItem;
5877  if(nextItem != VMA_NULL)
5878  {
5879  nextItem->pPrev = newItem;
5880  }
5881  else
5882  {
5883  VMA_HEAVY_ASSERT(m_pBack == pItem);
5884  m_pBack = newItem;
5885  }
5886  ++m_Count;
5887  return newItem;
5888  }
5889  else
5890  return PushFront();
5891 }
5892 
5893 template<typename T>
5894 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5895 {
5896  ItemType* const newItem = InsertBefore(pItem);
5897  newItem->Value = value;
5898  return newItem;
5899 }
5900 
5901 template<typename T>
5902 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5903 {
5904  ItemType* const newItem = InsertAfter(pItem);
5905  newItem->Value = value;
5906  return newItem;
5907 }
5908 
5909 template<typename T, typename AllocatorT>
5910 class VmaList
5911 {
5912  VMA_CLASS_NO_COPY(VmaList)
5913 public:
5914  class iterator
5915  {
5916  public:
5917  iterator() :
5918  m_pList(VMA_NULL),
5919  m_pItem(VMA_NULL)
5920  {
5921  }
5922 
5923  T& operator*() const
5924  {
5925  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5926  return m_pItem->Value;
5927  }
5928  T* operator->() const
5929  {
5930  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5931  return &m_pItem->Value;
5932  }
5933 
5934  iterator& operator++()
5935  {
5936  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5937  m_pItem = m_pItem->pNext;
5938  return *this;
5939  }
5940  iterator& operator--()
5941  {
5942  if(m_pItem != VMA_NULL)
5943  {
5944  m_pItem = m_pItem->pPrev;
5945  }
5946  else
5947  {
5948  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5949  m_pItem = m_pList->Back();
5950  }
5951  return *this;
5952  }
5953 
5954  iterator operator++(int)
5955  {
5956  iterator result = *this;
5957  ++*this;
5958  return result;
5959  }
5960  iterator operator--(int)
5961  {
5962  iterator result = *this;
5963  --*this;
5964  return result;
5965  }
5966 
5967  bool operator==(const iterator& rhs) const
5968  {
5969  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5970  return m_pItem == rhs.m_pItem;
5971  }
5972  bool operator!=(const iterator& rhs) const
5973  {
5974  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5975  return m_pItem != rhs.m_pItem;
5976  }
5977 
5978  private:
5979  VmaRawList<T>* m_pList;
5980  VmaListItem<T>* m_pItem;
5981 
5982  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5983  m_pList(pList),
5984  m_pItem(pItem)
5985  {
5986  }
5987 
5988  friend class VmaList<T, AllocatorT>;
5989  };
5990 
5991  class const_iterator
5992  {
5993  public:
5994  const_iterator() :
5995  m_pList(VMA_NULL),
5996  m_pItem(VMA_NULL)
5997  {
5998  }
5999 
6000  const_iterator(const iterator& src) :
6001  m_pList(src.m_pList),
6002  m_pItem(src.m_pItem)
6003  {
6004  }
6005 
6006  const T& operator*() const
6007  {
6008  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
6009  return m_pItem->Value;
6010  }
6011  const T* operator->() const
6012  {
6013  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
6014  return &m_pItem->Value;
6015  }
6016 
6017  const_iterator& operator++()
6018  {
6019  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
6020  m_pItem = m_pItem->pNext;
6021  return *this;
6022  }
6023  const_iterator& operator--()
6024  {
6025  if(m_pItem != VMA_NULL)
6026  {
6027  m_pItem = m_pItem->pPrev;
6028  }
6029  else
6030  {
6031  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
6032  m_pItem = m_pList->Back();
6033  }
6034  return *this;
6035  }
6036 
6037  const_iterator operator++(int)
6038  {
6039  const_iterator result = *this;
6040  ++*this;
6041  return result;
6042  }
6043  const_iterator operator--(int)
6044  {
6045  const_iterator result = *this;
6046  --*this;
6047  return result;
6048  }
6049 
6050  bool operator==(const const_iterator& rhs) const
6051  {
6052  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
6053  return m_pItem == rhs.m_pItem;
6054  }
6055  bool operator!=(const const_iterator& rhs) const
6056  {
6057  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
6058  return m_pItem != rhs.m_pItem;
6059  }
6060 
6061  private:
6062  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
6063  m_pList(pList),
6064  m_pItem(pItem)
6065  {
6066  }
6067 
6068  const VmaRawList<T>* m_pList;
6069  const VmaListItem<T>* m_pItem;
6070 
6071  friend class VmaList<T, AllocatorT>;
6072  };
6073 
6074  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
6075 
6076  bool empty() const { return m_RawList.IsEmpty(); }
6077  size_t size() const { return m_RawList.GetCount(); }
6078 
6079  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
6080  iterator end() { return iterator(&m_RawList, VMA_NULL); }
6081 
6082  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
6083  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
6084 
6085  const_iterator begin() const { return cbegin(); }
6086  const_iterator end() const { return cend(); }
6087 
6088  void clear() { m_RawList.Clear(); }
6089  void push_back(const T& value) { m_RawList.PushBack(value); }
6090  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
6091  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
6092 
6093 private:
6094  VmaRawList<T> m_RawList;
6095 };
6096 
6097 #endif // #if VMA_USE_STL_LIST
6098 
6100 // class VmaIntrusiveLinkedList
6101 
6102 /*
6103 Expected interface of ItemTypeTraits:
6104 struct MyItemTypeTraits
6105 {
6106  typedef MyItem ItemType;
6107  static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
6108  static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
6109  static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
6110  static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
6111 };
6112 */
6113 template<typename ItemTypeTraits>
6114 class VmaIntrusiveLinkedList
6115 {
6116 public:
6117  typedef typename ItemTypeTraits::ItemType ItemType;
6118  static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
6119  static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
6120  // Movable, not copyable.
6121  VmaIntrusiveLinkedList() { }
6122  VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList<ItemTypeTraits>& src) = delete;
6123  VmaIntrusiveLinkedList(VmaIntrusiveLinkedList<ItemTypeTraits>&& src) :
6124  m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
6125  {
6126  src.m_Front = src.m_Back = VMA_NULL;
6127  src.m_Count = 0;
6128  }
6129  ~VmaIntrusiveLinkedList()
6130  {
6131  VMA_HEAVY_ASSERT(IsEmpty());
6132  }
6133  VmaIntrusiveLinkedList<ItemTypeTraits>& operator=(const VmaIntrusiveLinkedList<ItemTypeTraits>& src) = delete;
6134  VmaIntrusiveLinkedList<ItemTypeTraits>& operator=(VmaIntrusiveLinkedList<ItemTypeTraits>&& src)
6135  {
6136  if(&src != this)
6137  {
6138  VMA_HEAVY_ASSERT(IsEmpty());
6139  m_Front = src.m_Front;
6140  m_Back = src.m_Back;
6141  m_Count = src.m_Count;
6142  src.m_Front = src.m_Back = VMA_NULL;
6143  src.m_Count = 0;
6144  }
6145  return *this;
6146  }
6147  void RemoveAll()
6148  {
6149  if(!IsEmpty())
6150  {
6151  ItemType* item = m_Back;
6152  while(item != VMA_NULL)
6153  {
6154  ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
6155  ItemTypeTraits::AccessPrev(item) = VMA_NULL;
6156  ItemTypeTraits::AccessNext(item) = VMA_NULL;
6157  item = prevItem;
6158  }
6159  m_Front = VMA_NULL;
6160  m_Back = VMA_NULL;
6161  m_Count = 0;
6162  }
6163  }
6164  size_t GetCount() const { return m_Count; }
6165  bool IsEmpty() const { return m_Count == 0; }
6166  ItemType* Front() { return m_Front; }
6167  const ItemType* Front() const { return m_Front; }
6168  ItemType* Back() { return m_Back; }
6169  const ItemType* Back() const { return m_Back; }
6170  void PushBack(ItemType* item)
6171  {
6172  VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
6173  if(IsEmpty())
6174  {
6175  m_Front = item;
6176  m_Back = item;
6177  m_Count = 1;
6178  }
6179  else
6180  {
6181  ItemTypeTraits::AccessPrev(item) = m_Back;
6182  ItemTypeTraits::AccessNext(m_Back) = item;
6183  m_Back = item;
6184  ++m_Count;
6185  }
6186  }
6187  void PushFront(ItemType* item)
6188  {
6189  VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
6190  if(IsEmpty())
6191  {
6192  m_Front = item;
6193  m_Back = item;
6194  m_Count = 1;
6195  }
6196  else
6197  {
6198  ItemTypeTraits::AccessNext(item) = m_Front;
6199  ItemTypeTraits::AccessPrev(m_Front) = item;
6200  m_Front = item;
6201  ++m_Count;
6202  }
6203  }
6204  ItemType* PopBack()
6205  {
6206  VMA_HEAVY_ASSERT(m_Count > 0);
6207  ItemType* const backItem = m_Back;
6208  ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
6209  if(prevItem != VMA_NULL)
6210  {
6211  ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
6212  }
6213  m_Back = prevItem;
6214  --m_Count;
6215  ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
6216  ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
6217  return backItem;
6218  }
6219  ItemType* PopFront()
6220  {
6221  VMA_HEAVY_ASSERT(m_Count > 0);
6222  ItemType* const frontItem = m_Front;
6223  ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
6224  if(nextItem != VMA_NULL)
6225  {
6226  ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
6227  }
6228  m_Front = nextItem;
6229  --m_Count;
6230  ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
6231  ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
6232  return frontItem;
6233  }
6234 
6235  // MyItem can be null - it means PushBack.
6236  void InsertBefore(ItemType* existingItem, ItemType* newItem)
6237  {
6238  VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
6239  if(existingItem != VMA_NULL)
6240  {
6241  ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
6242  ItemTypeTraits::AccessPrev(newItem) = prevItem;
6243  ItemTypeTraits::AccessNext(newItem) = existingItem;
6244  ItemTypeTraits::AccessPrev(existingItem) = newItem;
6245  if(prevItem != VMA_NULL)
6246  {
6247  ItemTypeTraits::AccessNext(prevItem) = newItem;
6248  }
6249  else
6250  {
6251  VMA_HEAVY_ASSERT(m_Front == existingItem);
6252  m_Front = newItem;
6253  }
6254  ++m_Count;
6255  }
6256  else
6257  PushBack(newItem);
6258  }
6259  // MyItem can be null - it means PushFront.
6260  void InsertAfter(ItemType* existingItem, ItemType* newItem)
6261  {
6262  VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
6263  if(existingItem != VMA_NULL)
6264  {
6265  ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
6266  ItemTypeTraits::AccessNext(newItem) = nextItem;
6267  ItemTypeTraits::AccessPrev(newItem) = existingItem;
6268  ItemTypeTraits::AccessNext(existingItem) = newItem;
6269  if(nextItem != VMA_NULL)
6270  {
6271  ItemTypeTraits::AccessPrev(nextItem) = newItem;
6272  }
6273  else
6274  {
6275  VMA_HEAVY_ASSERT(m_Back == existingItem);
6276  m_Back = newItem;
6277  }
6278  ++m_Count;
6279  }
6280  else
6281  return PushFront(newItem);
6282  }
6283  void Remove(ItemType* item)
6284  {
6285  VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
6286  if(ItemTypeTraits::GetPrev(item) != VMA_NULL)
6287  {
6288  ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
6289  }
6290  else
6291  {
6292  VMA_HEAVY_ASSERT(m_Front == item);
6293  m_Front = ItemTypeTraits::GetNext(item);
6294  }
6295 
6296  if(ItemTypeTraits::GetNext(item) != VMA_NULL)
6297  {
6298  ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
6299  }
6300  else
6301  {
6302  VMA_HEAVY_ASSERT(m_Back == item);
6303  m_Back = ItemTypeTraits::GetPrev(item);
6304  }
6305  ItemTypeTraits::AccessPrev(item) = VMA_NULL;
6306  ItemTypeTraits::AccessNext(item) = VMA_NULL;
6307  --m_Count;
6308  }
6309 private:
6310  ItemType* m_Front = VMA_NULL;
6311  ItemType* m_Back = VMA_NULL;
6312  size_t m_Count = 0;
6313 };
6314 
6316 // class VmaMap
6317 
6318 // Unused in this version.
6319 #if 0
6320 
6321 #if VMA_USE_STL_UNORDERED_MAP
6322 
6323 #define VmaPair std::pair
6324 
6325 #define VMA_MAP_TYPE(KeyT, ValueT) \
6326  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
6327 
6328 #else // #if VMA_USE_STL_UNORDERED_MAP
6329 
6330 template<typename T1, typename T2>
6331 struct VmaPair
6332 {
6333  T1 first;
6334  T2 second;
6335 
6336  VmaPair() : first(), second() { }
6337  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
6338 };
6339 
6340 /* Class compatible with subset of interface of std::unordered_map.
6341 KeyT, ValueT must be POD because they will be stored in VmaVector.
6342 */
6343 template<typename KeyT, typename ValueT>
6344 class VmaMap
6345 {
6346 public:
6347  typedef VmaPair<KeyT, ValueT> PairType;
6348  typedef PairType* iterator;
6349 
6350  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
6351 
6352  iterator begin() { return m_Vector.begin(); }
6353  iterator end() { return m_Vector.end(); }
6354 
6355  void insert(const PairType& pair);
6356  iterator find(const KeyT& key);
6357  void erase(iterator it);
6358 
6359 private:
6360  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
6361 };
6362 
6363 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
6364 
6365 template<typename FirstT, typename SecondT>
6366 struct VmaPairFirstLess
6367 {
6368  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
6369  {
6370  return lhs.first < rhs.first;
6371  }
6372  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
6373  {
6374  return lhs.first < rhsFirst;
6375  }
6376 };
6377 
6378 template<typename KeyT, typename ValueT>
6379 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
6380 {
6381  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
6382  m_Vector.data(),
6383  m_Vector.data() + m_Vector.size(),
6384  pair,
6385  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
6386  VmaVectorInsert(m_Vector, indexToInsert, pair);
6387 }
6388 
6389 template<typename KeyT, typename ValueT>
6390 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
6391 {
6392  PairType* it = VmaBinaryFindFirstNotLess(
6393  m_Vector.data(),
6394  m_Vector.data() + m_Vector.size(),
6395  key,
6396  VmaPairFirstLess<KeyT, ValueT>());
6397  if((it != m_Vector.end()) && (it->first == key))
6398  {
6399  return it;
6400  }
6401  else
6402  {
6403  return m_Vector.end();
6404  }
6405 }
6406 
6407 template<typename KeyT, typename ValueT>
6408 void VmaMap<KeyT, ValueT>::erase(iterator it)
6409 {
6410  VmaVectorRemove(m_Vector, it - m_Vector.begin());
6411 }
6412 
6413 #endif // #if VMA_USE_STL_UNORDERED_MAP
6414 
6415 #endif // #if 0
6416 
6418 
6419 class VmaDeviceMemoryBlock;
6420 
6421 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
6422 
6423 struct VmaAllocation_T
6424 {
6425 private:
6426  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
6427 
6428  enum FLAGS
6429  {
6430  FLAG_USER_DATA_STRING = 0x01,
6431  };
6432 
6433 public:
6434  enum ALLOCATION_TYPE
6435  {
6436  ALLOCATION_TYPE_NONE,
6437  ALLOCATION_TYPE_BLOCK,
6438  ALLOCATION_TYPE_DEDICATED,
6439  };
6440 
6441  /*
6442  This struct is allocated using VmaPoolAllocator.
6443  */
6444 
6445  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
6446  m_Alignment{1},
6447  m_Size{0},
6448  m_pUserData{VMA_NULL},
6449  m_LastUseFrameIndex{currentFrameIndex},
6450  m_MemoryTypeIndex{0},
6451  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
6452  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
6453  m_MapCount{0},
6454  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
6455  {
6456 #if VMA_STATS_STRING_ENABLED
6457  m_CreationFrameIndex = currentFrameIndex;
6458  m_BufferImageUsage = 0;
6459 #endif
6460  }
6461 
6462  ~VmaAllocation_T()
6463  {
6464  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
6465 
6466  // Check if owned string was freed.
6467  VMA_ASSERT(m_pUserData == VMA_NULL);
6468  }
6469 
6470  void InitBlockAllocation(
6471  VmaDeviceMemoryBlock* block,
6472  VkDeviceSize offset,
6473  VkDeviceSize alignment,
6474  VkDeviceSize size,
6475  uint32_t memoryTypeIndex,
6476  VmaSuballocationType suballocationType,
6477  bool mapped,
6478  bool canBecomeLost)
6479  {
6480  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6481  VMA_ASSERT(block != VMA_NULL);
6482  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6483  m_Alignment = alignment;
6484  m_Size = size;
6485  m_MemoryTypeIndex = memoryTypeIndex;
6486  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6487  m_SuballocationType = (uint8_t)suballocationType;
6488  m_BlockAllocation.m_Block = block;
6489  m_BlockAllocation.m_Offset = offset;
6490  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
6491  }
6492 
6493  void InitLost()
6494  {
6495  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6496  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
6497  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6498  m_MemoryTypeIndex = 0;
6499  m_BlockAllocation.m_Block = VMA_NULL;
6500  m_BlockAllocation.m_Offset = 0;
6501  m_BlockAllocation.m_CanBecomeLost = true;
6502  }
6503 
6504  void ChangeBlockAllocation(
6505  VmaAllocator hAllocator,
6506  VmaDeviceMemoryBlock* block,
6507  VkDeviceSize offset);
6508 
6509  void ChangeOffset(VkDeviceSize newOffset);
6510 
6511  // pMappedData not null means allocation is created with MAPPED flag.
6512  void InitDedicatedAllocation(
6513  uint32_t memoryTypeIndex,
6514  VkDeviceMemory hMemory,
6515  VmaSuballocationType suballocationType,
6516  void* pMappedData,
6517  VkDeviceSize size)
6518  {
6519  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6520  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
6521  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
6522  m_Alignment = 0;
6523  m_Size = size;
6524  m_MemoryTypeIndex = memoryTypeIndex;
6525  m_SuballocationType = (uint8_t)suballocationType;
6526  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6527  m_DedicatedAllocation.m_hMemory = hMemory;
6528  m_DedicatedAllocation.m_pMappedData = pMappedData;
6529  m_DedicatedAllocation.m_Prev = VMA_NULL;
6530  m_DedicatedAllocation.m_Next = VMA_NULL;
6531  }
6532 
6533  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
6534  VkDeviceSize GetAlignment() const { return m_Alignment; }
6535  VkDeviceSize GetSize() const { return m_Size; }
6536  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
6537  void* GetUserData() const { return m_pUserData; }
6538  void SetUserData(VmaAllocator hAllocator, void* pUserData);
6539  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
6540 
6541  VmaDeviceMemoryBlock* GetBlock() const
6542  {
6543  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6544  return m_BlockAllocation.m_Block;
6545  }
6546  VkDeviceSize GetOffset() const;
6547  VkDeviceMemory GetMemory() const;
6548  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6549  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
6550  void* GetMappedData() const;
6551  bool CanBecomeLost() const;
6552 
6553  uint32_t GetLastUseFrameIndex() const
6554  {
6555  return m_LastUseFrameIndex.load();
6556  }
6557  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
6558  {
6559  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
6560  }
6561  /*
6562  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
6563  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
6564  - Else, returns false.
6565 
6566  If hAllocation is already lost, assert - you should not call it then.
6567  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
6568  */
6569  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6570 
6571  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
6572  {
6573  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
6574  outInfo.blockCount = 1;
6575  outInfo.allocationCount = 1;
6576  outInfo.unusedRangeCount = 0;
6577  outInfo.usedBytes = m_Size;
6578  outInfo.unusedBytes = 0;
6579  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
6580  outInfo.unusedRangeSizeMin = UINT64_MAX;
6581  outInfo.unusedRangeSizeMax = 0;
6582  }
6583 
6584  void BlockAllocMap();
6585  void BlockAllocUnmap();
6586  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
6587  void DedicatedAllocUnmap(VmaAllocator hAllocator);
6588 
6589 #if VMA_STATS_STRING_ENABLED
6590  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
6591  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
6592 
6593  void InitBufferImageUsage(uint32_t bufferImageUsage)
6594  {
6595  VMA_ASSERT(m_BufferImageUsage == 0);
6596  m_BufferImageUsage = bufferImageUsage;
6597  }
6598 
6599  void PrintParameters(class VmaJsonWriter& json) const;
6600 #endif
6601 
6602 private:
6603  VkDeviceSize m_Alignment;
6604  VkDeviceSize m_Size;
6605  void* m_pUserData;
6606  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
6607  uint32_t m_MemoryTypeIndex;
6608  uint8_t m_Type; // ALLOCATION_TYPE
6609  uint8_t m_SuballocationType; // VmaSuballocationType
6610  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
6611  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
6612  uint8_t m_MapCount;
6613  uint8_t m_Flags; // enum FLAGS
6614 
6615  // Allocation out of VmaDeviceMemoryBlock.
6616  struct BlockAllocation
6617  {
6618  VmaDeviceMemoryBlock* m_Block;
6619  VkDeviceSize m_Offset;
6620  bool m_CanBecomeLost;
6621  };
6622 
6623  // Allocation for an object that has its own private VkDeviceMemory.
6624  struct DedicatedAllocation
6625  {
6626  VkDeviceMemory m_hMemory;
6627  void* m_pMappedData; // Not null means memory is mapped.
6628  VmaAllocation_T* m_Prev;
6629  VmaAllocation_T* m_Next;
6630  };
6631 
6632  union
6633  {
6634  // Allocation out of VmaDeviceMemoryBlock.
6635  BlockAllocation m_BlockAllocation;
6636  // Allocation for an object that has its own private VkDeviceMemory.
6637  DedicatedAllocation m_DedicatedAllocation;
6638  };
6639 
6640 #if VMA_STATS_STRING_ENABLED
6641  uint32_t m_CreationFrameIndex;
6642  uint32_t m_BufferImageUsage; // 0 if unknown.
6643 #endif
6644 
6645  void FreeUserDataString(VmaAllocator hAllocator);
6646 
6647  friend struct VmaDedicatedAllocationListItemTraits;
6648 };
6649 
6650 struct VmaDedicatedAllocationListItemTraits
6651 {
6652  typedef VmaAllocation_T ItemType;
6653  static ItemType* GetPrev(const ItemType* item)
6654  {
6655  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6656  return item->m_DedicatedAllocation.m_Prev;
6657  }
6658  static ItemType* GetNext(const ItemType* item)
6659  {
6660  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6661  return item->m_DedicatedAllocation.m_Next;
6662  }
6663  static ItemType*& AccessPrev(ItemType* item)
6664  {
6665  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6666  return item->m_DedicatedAllocation.m_Prev;
6667  }
6668  static ItemType*& AccessNext(ItemType* item){
6669  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6670  return item->m_DedicatedAllocation.m_Next;
6671  }
6672 };
6673 
6674 /*
6675 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
6676 allocated memory block or free.
6677 */
6678 struct VmaSuballocation
6679 {
6680  VkDeviceSize offset;
6681  VkDeviceSize size;
6682  VmaAllocation hAllocation;
6683  VmaSuballocationType type;
6684 };
6685 
6686 // Comparator for offsets.
6687 struct VmaSuballocationOffsetLess
6688 {
6689  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6690  {
6691  return lhs.offset < rhs.offset;
6692  }
6693 };
6694 struct VmaSuballocationOffsetGreater
6695 {
6696  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6697  {
6698  return lhs.offset > rhs.offset;
6699  }
6700 };
6701 
6702 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
6703 
6704 // Cost of one additional allocation lost, as equivalent in bytes.
6705 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
6706 
6707 enum class VmaAllocationRequestType
6708 {
6709  Normal,
6710  // Used by "Linear" algorithm.
6711  UpperAddress,
6712  EndOf1st,
6713  EndOf2nd,
6714 };
6715 
6716 /*
6717 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
6718 
6719 If canMakeOtherLost was false:
6720 - item points to a FREE suballocation.
6721 - itemsToMakeLostCount is 0.
6722 
6723 If canMakeOtherLost was true:
6724 - item points to first of sequence of suballocations, which are either FREE,
6725  or point to VmaAllocations that can become lost.
6726 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
6727  the requested allocation to succeed.
6728 */
6729 struct VmaAllocationRequest
6730 {
6731  VkDeviceSize offset;
6732  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
6733  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
6734  VmaSuballocationList::iterator item;
6735  size_t itemsToMakeLostCount;
6736  void* customData;
6737  VmaAllocationRequestType type;
6738 
6739  VkDeviceSize CalcCost() const
6740  {
6741  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
6742  }
6743 };
6744 
6745 /*
6746 Data structure used for bookkeeping of allocations and unused ranges of memory
6747 in a single VkDeviceMemory block.
6748 */
6749 class VmaBlockMetadata
6750 {
6751 public:
6752  VmaBlockMetadata(VmaAllocator hAllocator);
6753  virtual ~VmaBlockMetadata() { }
6754  virtual void Init(VkDeviceSize size) { m_Size = size; }
6755 
6756  // Validates all data structures inside this object. If not valid, returns false.
6757  virtual bool Validate() const = 0;
6758  VkDeviceSize GetSize() const { return m_Size; }
6759  virtual size_t GetAllocationCount() const = 0;
6760  virtual VkDeviceSize GetSumFreeSize() const = 0;
6761  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
6762  // Returns true if this block is empty - contains only single free suballocation.
6763  virtual bool IsEmpty() const = 0;
6764 
6765  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
6766  // Shouldn't modify blockCount.
6767  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
6768 
6769 #if VMA_STATS_STRING_ENABLED
6770  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
6771 #endif
6772 
6773  // Tries to find a place for suballocation with given parameters inside this block.
6774  // If succeeded, fills pAllocationRequest and returns true.
6775  // If failed, returns false.
6776  virtual bool CreateAllocationRequest(
6777  uint32_t currentFrameIndex,
6778  uint32_t frameInUseCount,
6779  VkDeviceSize bufferImageGranularity,
6780  VkDeviceSize allocSize,
6781  VkDeviceSize allocAlignment,
6782  bool upperAddress,
6783  VmaSuballocationType allocType,
6784  bool canMakeOtherLost,
6785  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6786  uint32_t strategy,
6787  VmaAllocationRequest* pAllocationRequest) = 0;
6788 
6789  virtual bool MakeRequestedAllocationsLost(
6790  uint32_t currentFrameIndex,
6791  uint32_t frameInUseCount,
6792  VmaAllocationRequest* pAllocationRequest) = 0;
6793 
6794  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
6795 
6796  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6797 
6798  // Makes actual allocation based on request. Request must already be checked and valid.
6799  virtual void Alloc(
6800  const VmaAllocationRequest& request,
6801  VmaSuballocationType type,
6802  VkDeviceSize allocSize,
6803  VmaAllocation hAllocation) = 0;
6804 
6805  // Frees suballocation assigned to given memory region.
6806  virtual void Free(const VmaAllocation allocation) = 0;
6807  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
6808 
6809 protected:
6810  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
6811 
6812 #if VMA_STATS_STRING_ENABLED
6813  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6814  VkDeviceSize unusedBytes,
6815  size_t allocationCount,
6816  size_t unusedRangeCount) const;
6817  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6818  VkDeviceSize offset,
6819  VmaAllocation hAllocation) const;
6820  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6821  VkDeviceSize offset,
6822  VkDeviceSize size) const;
6823  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6824 #endif
6825 
6826 private:
6827  VkDeviceSize m_Size;
6828  const VkAllocationCallbacks* m_pAllocationCallbacks;
6829 };
6830 
6831 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
6832  VMA_ASSERT(0 && "Validation failed: " #cond); \
6833  return false; \
6834  } } while(false)
6835 
6836 class VmaBlockMetadata_Generic : public VmaBlockMetadata
6837 {
6838  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
6839 public:
6840  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
6841  virtual ~VmaBlockMetadata_Generic();
6842  virtual void Init(VkDeviceSize size);
6843 
6844  virtual bool Validate() const;
6845  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
6846  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6847  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6848  virtual bool IsEmpty() const;
6849 
6850  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6851  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6852 
6853 #if VMA_STATS_STRING_ENABLED
6854  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6855 #endif
6856 
6857  virtual bool CreateAllocationRequest(
6858  uint32_t currentFrameIndex,
6859  uint32_t frameInUseCount,
6860  VkDeviceSize bufferImageGranularity,
6861  VkDeviceSize allocSize,
6862  VkDeviceSize allocAlignment,
6863  bool upperAddress,
6864  VmaSuballocationType allocType,
6865  bool canMakeOtherLost,
6866  uint32_t strategy,
6867  VmaAllocationRequest* pAllocationRequest);
6868 
6869  virtual bool MakeRequestedAllocationsLost(
6870  uint32_t currentFrameIndex,
6871  uint32_t frameInUseCount,
6872  VmaAllocationRequest* pAllocationRequest);
6873 
6874  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6875 
6876  virtual VkResult CheckCorruption(const void* pBlockData);
6877 
6878  virtual void Alloc(
6879  const VmaAllocationRequest& request,
6880  VmaSuballocationType type,
6881  VkDeviceSize allocSize,
6882  VmaAllocation hAllocation);
6883 
6884  virtual void Free(const VmaAllocation allocation);
6885  virtual void FreeAtOffset(VkDeviceSize offset);
6886 
6888  // For defragmentation
6889 
6890  bool IsBufferImageGranularityConflictPossible(
6891  VkDeviceSize bufferImageGranularity,
6892  VmaSuballocationType& inOutPrevSuballocType) const;
6893 
6894 private:
6895  friend class VmaDefragmentationAlgorithm_Generic;
6896  friend class VmaDefragmentationAlgorithm_Fast;
6897 
6898  uint32_t m_FreeCount;
6899  VkDeviceSize m_SumFreeSize;
6900  VmaSuballocationList m_Suballocations;
6901  // Suballocations that are free and have size greater than certain threshold.
6902  // Sorted by size, ascending.
6903  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
6904 
6905  bool ValidateFreeSuballocationList() const;
6906 
6907  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6908  // If yes, fills pOffset and returns true. If no, returns false.
6909  bool CheckAllocation(
6910  uint32_t currentFrameIndex,
6911  uint32_t frameInUseCount,
6912  VkDeviceSize bufferImageGranularity,
6913  VkDeviceSize allocSize,
6914  VkDeviceSize allocAlignment,
6915  VmaSuballocationType allocType,
6916  VmaSuballocationList::const_iterator suballocItem,
6917  bool canMakeOtherLost,
6918  VkDeviceSize* pOffset,
6919  size_t* itemsToMakeLostCount,
6920  VkDeviceSize* pSumFreeSize,
6921  VkDeviceSize* pSumItemSize) const;
6922  // Given free suballocation, it merges it with following one, which must also be free.
6923  void MergeFreeWithNext(VmaSuballocationList::iterator item);
6924  // Releases given suballocation, making it free.
6925  // Merges it with adjacent free suballocations if applicable.
6926  // Returns iterator to new free suballocation at this place.
6927  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6928  // Given free suballocation, it inserts it into sorted list of
6929  // m_FreeSuballocationsBySize if it's suitable.
6930  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6931  // Given free suballocation, it removes it from sorted list of
6932  // m_FreeSuballocationsBySize if it's suitable.
6933  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6934 };
6935 
6936 /*
6937 Allocations and their references in internal data structure look like this:
6938 
6939 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6940 
6941  0 +-------+
6942  | |
6943  | |
6944  | |
6945  +-------+
6946  | Alloc | 1st[m_1stNullItemsBeginCount]
6947  +-------+
6948  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6949  +-------+
6950  | ... |
6951  +-------+
6952  | Alloc | 1st[1st.size() - 1]
6953  +-------+
6954  | |
6955  | |
6956  | |
6957 GetSize() +-------+
6958 
6959 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6960 
6961  0 +-------+
6962  | Alloc | 2nd[0]
6963  +-------+
6964  | Alloc | 2nd[1]
6965  +-------+
6966  | ... |
6967  +-------+
6968  | Alloc | 2nd[2nd.size() - 1]
6969  +-------+
6970  | |
6971  | |
6972  | |
6973  +-------+
6974  | Alloc | 1st[m_1stNullItemsBeginCount]
6975  +-------+
6976  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6977  +-------+
6978  | ... |
6979  +-------+
6980  | Alloc | 1st[1st.size() - 1]
6981  +-------+
6982  | |
6983 GetSize() +-------+
6984 
6985 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6986 
6987  0 +-------+
6988  | |
6989  | |
6990  | |
6991  +-------+
6992  | Alloc | 1st[m_1stNullItemsBeginCount]
6993  +-------+
6994  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6995  +-------+
6996  | ... |
6997  +-------+
6998  | Alloc | 1st[1st.size() - 1]
6999  +-------+
7000  | |
7001  | |
7002  | |
7003  +-------+
7004  | Alloc | 2nd[2nd.size() - 1]
7005  +-------+
7006  | ... |
7007  +-------+
7008  | Alloc | 2nd[1]
7009  +-------+
7010  | Alloc | 2nd[0]
7011 GetSize() +-------+
7012 
7013 */
7014 class VmaBlockMetadata_Linear : public VmaBlockMetadata
7015 {
7016  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
7017 public:
7018  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
7019  virtual ~VmaBlockMetadata_Linear();
7020  virtual void Init(VkDeviceSize size);
7021 
7022  virtual bool Validate() const;
7023  virtual size_t GetAllocationCount() const;
7024  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
7025  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
7026  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
7027 
7028  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
7029  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
7030 
7031 #if VMA_STATS_STRING_ENABLED
7032  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
7033 #endif
7034 
7035  virtual bool CreateAllocationRequest(
7036  uint32_t currentFrameIndex,
7037  uint32_t frameInUseCount,
7038  VkDeviceSize bufferImageGranularity,
7039  VkDeviceSize allocSize,
7040  VkDeviceSize allocAlignment,
7041  bool upperAddress,
7042  VmaSuballocationType allocType,
7043  bool canMakeOtherLost,
7044  uint32_t strategy,
7045  VmaAllocationRequest* pAllocationRequest);
7046 
7047  virtual bool MakeRequestedAllocationsLost(
7048  uint32_t currentFrameIndex,
7049  uint32_t frameInUseCount,
7050  VmaAllocationRequest* pAllocationRequest);
7051 
7052  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
7053 
7054  virtual VkResult CheckCorruption(const void* pBlockData);
7055 
7056  virtual void Alloc(
7057  const VmaAllocationRequest& request,
7058  VmaSuballocationType type,
7059  VkDeviceSize allocSize,
7060  VmaAllocation hAllocation);
7061 
7062  virtual void Free(const VmaAllocation allocation);
7063  virtual void FreeAtOffset(VkDeviceSize offset);
7064 
7065 private:
7066  /*
7067  There are two suballocation vectors, used in ping-pong way.
7068  The one with index m_1stVectorIndex is called 1st.
7069  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
7070  2nd can be non-empty only when 1st is not empty.
7071  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
7072  */
7073  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
7074 
7075  enum SECOND_VECTOR_MODE
7076  {
7077  SECOND_VECTOR_EMPTY,
7078  /*
7079  Suballocations in 2nd vector are created later than the ones in 1st, but they
7080  all have smaller offset.
7081  */
7082  SECOND_VECTOR_RING_BUFFER,
7083  /*
7084  Suballocations in 2nd vector are upper side of double stack.
7085  They all have offsets higher than those in 1st vector.
7086  Top of this stack means smaller offsets, but higher indices in this vector.
7087  */
7088  SECOND_VECTOR_DOUBLE_STACK,
7089  };
7090 
7091  VkDeviceSize m_SumFreeSize;
7092  SuballocationVectorType m_Suballocations0, m_Suballocations1;
7093  uint32_t m_1stVectorIndex;
7094  SECOND_VECTOR_MODE m_2ndVectorMode;
7095 
7096  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
7097  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
7098  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
7099  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
7100 
7101  // Number of items in 1st vector with hAllocation = null at the beginning.
7102  size_t m_1stNullItemsBeginCount;
7103  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
7104  size_t m_1stNullItemsMiddleCount;
7105  // Number of items in 2nd vector with hAllocation = null.
7106  size_t m_2ndNullItemsCount;
7107 
7108  bool ShouldCompact1st() const;
7109  void CleanupAfterFree();
7110 
7111  bool CreateAllocationRequest_LowerAddress(
7112  uint32_t currentFrameIndex,
7113  uint32_t frameInUseCount,
7114  VkDeviceSize bufferImageGranularity,
7115  VkDeviceSize allocSize,
7116  VkDeviceSize allocAlignment,
7117  VmaSuballocationType allocType,
7118  bool canMakeOtherLost,
7119  uint32_t strategy,
7120  VmaAllocationRequest* pAllocationRequest);
7121  bool CreateAllocationRequest_UpperAddress(
7122  uint32_t currentFrameIndex,
7123  uint32_t frameInUseCount,
7124  VkDeviceSize bufferImageGranularity,
7125  VkDeviceSize allocSize,
7126  VkDeviceSize allocAlignment,
7127  VmaSuballocationType allocType,
7128  bool canMakeOtherLost,
7129  uint32_t strategy,
7130  VmaAllocationRequest* pAllocationRequest);
7131 };
7132 
7133 /*
7134 - GetSize() is the original size of allocated memory block.
7135 - m_UsableSize is this size aligned down to a power of two.
7136  All allocations and calculations happen relative to m_UsableSize.
7137 - GetUnusableSize() is the difference between them.
7138  It is reported as separate, unused range, not available for allocations.
7139 
7140 Node at level 0 has size = m_UsableSize.
7141 Each next level contains nodes with size 2 times smaller than current level.
7142 m_LevelCount is the maximum number of levels to use in the current object.
7143 */
7144 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
7145 {
7146  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
7147 public:
7148  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
7149  virtual ~VmaBlockMetadata_Buddy();
7150  virtual void Init(VkDeviceSize size);
7151 
7152  virtual bool Validate() const;
7153  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
7154  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
7155  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
7156  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
7157 
7158  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
7159  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
7160 
7161 #if VMA_STATS_STRING_ENABLED
7162  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
7163 #endif
7164 
7165  virtual bool CreateAllocationRequest(
7166  uint32_t currentFrameIndex,
7167  uint32_t frameInUseCount,
7168  VkDeviceSize bufferImageGranularity,
7169  VkDeviceSize allocSize,
7170  VkDeviceSize allocAlignment,
7171  bool upperAddress,
7172  VmaSuballocationType allocType,
7173  bool canMakeOtherLost,
7174  uint32_t strategy,
7175  VmaAllocationRequest* pAllocationRequest);
7176 
7177  virtual bool MakeRequestedAllocationsLost(
7178  uint32_t currentFrameIndex,
7179  uint32_t frameInUseCount,
7180  VmaAllocationRequest* pAllocationRequest);
7181 
7182  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
7183 
7184  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
7185 
7186  virtual void Alloc(
7187  const VmaAllocationRequest& request,
7188  VmaSuballocationType type,
7189  VkDeviceSize allocSize,
7190  VmaAllocation hAllocation);
7191 
7192  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
7193  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
7194 
7195 private:
7196  static const VkDeviceSize MIN_NODE_SIZE = 32;
7197  static const size_t MAX_LEVELS = 30;
7198 
7199  struct ValidationContext
7200  {
7201  size_t calculatedAllocationCount;
7202  size_t calculatedFreeCount;
7203  VkDeviceSize calculatedSumFreeSize;
7204 
7205  ValidationContext() :
7206  calculatedAllocationCount(0),
7207  calculatedFreeCount(0),
7208  calculatedSumFreeSize(0) { }
7209  };
7210 
7211  struct Node
7212  {
7213  VkDeviceSize offset;
7214  enum TYPE
7215  {
7216  TYPE_FREE,
7217  TYPE_ALLOCATION,
7218  TYPE_SPLIT,
7219  TYPE_COUNT
7220  } type;
7221  Node* parent;
7222  Node* buddy;
7223 
7224  union
7225  {
7226  struct
7227  {
7228  Node* prev;
7229  Node* next;
7230  } free;
7231  struct
7232  {
7233  VmaAllocation alloc;
7234  } allocation;
7235  struct
7236  {
7237  Node* leftChild;
7238  } split;
7239  };
7240  };
7241 
7242  // Size of the memory block aligned down to a power of two.
7243  VkDeviceSize m_UsableSize;
7244  uint32_t m_LevelCount;
7245 
7246  Node* m_Root;
7247  struct {
7248  Node* front;
7249  Node* back;
7250  } m_FreeList[MAX_LEVELS];
7251  // Number of nodes in the tree with type == TYPE_ALLOCATION.
7252  size_t m_AllocationCount;
7253  // Number of nodes in the tree with type == TYPE_FREE.
7254  size_t m_FreeCount;
7255  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
7256  VkDeviceSize m_SumFreeSize;
7257 
7258  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
7259  void DeleteNode(Node* node);
7260  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
7261  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
7262  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
7263  // Alloc passed just for validation. Can be null.
7264  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
7265  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
7266  // Adds node to the front of FreeList at given level.
7267  // node->type must be FREE.
7268  // node->free.prev, next can be undefined.
7269  void AddToFreeListFront(uint32_t level, Node* node);
7270  // Removes node from FreeList at given level.
7271  // node->type must be FREE.
7272  // node->free.prev, next stay untouched.
7273  void RemoveFromFreeList(uint32_t level, Node* node);
7274 
7275 #if VMA_STATS_STRING_ENABLED
7276  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
7277 #endif
7278 };
7279 
7280 /*
7281 Represents a single block of device memory (`VkDeviceMemory`) with all the
7282 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
7283 
7284 Thread-safety: This class must be externally synchronized.
7285 */
7286 class VmaDeviceMemoryBlock
7287 {
7288  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
7289 public:
7290  VmaBlockMetadata* m_pMetadata;
7291 
7292  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
7293 
7294  ~VmaDeviceMemoryBlock()
7295  {
7296  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
7297  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
7298  }
7299 
7300  // Always call after construction.
7301  void Init(
7302  VmaAllocator hAllocator,
7303  VmaPool hParentPool,
7304  uint32_t newMemoryTypeIndex,
7305  VkDeviceMemory newMemory,
7306  VkDeviceSize newSize,
7307  uint32_t id,
7308  uint32_t algorithm);
7309  // Always call before destruction.
7310  void Destroy(VmaAllocator allocator);
7311 
7312  VmaPool GetParentPool() const { return m_hParentPool; }
7313  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
7314  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
7315  uint32_t GetId() const { return m_Id; }
7316  void* GetMappedData() const { return m_pMappedData; }
7317 
7318  // Validates all data structures inside this object. If not valid, returns false.
7319  bool Validate() const;
7320 
7321  VkResult CheckCorruption(VmaAllocator hAllocator);
7322 
7323  // ppData can be null.
7324  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
7325  void Unmap(VmaAllocator hAllocator, uint32_t count);
7326 
7327  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
7328  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
7329 
7330  VkResult BindBufferMemory(
7331  const VmaAllocator hAllocator,
7332  const VmaAllocation hAllocation,
7333  VkDeviceSize allocationLocalOffset,
7334  VkBuffer hBuffer,
7335  const void* pNext);
7336  VkResult BindImageMemory(
7337  const VmaAllocator hAllocator,
7338  const VmaAllocation hAllocation,
7339  VkDeviceSize allocationLocalOffset,
7340  VkImage hImage,
7341  const void* pNext);
7342 
7343 private:
7344  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
7345  uint32_t m_MemoryTypeIndex;
7346  uint32_t m_Id;
7347  VkDeviceMemory m_hMemory;
7348 
7349  /*
7350  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
7351  Also protects m_MapCount, m_pMappedData.
7352  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
7353  */
7354  VMA_MUTEX m_Mutex;
7355  uint32_t m_MapCount;
7356  void* m_pMappedData;
7357 };
7358 
7359 struct VmaDefragmentationMove
7360 {
7361  size_t srcBlockIndex;
7362  size_t dstBlockIndex;
7363  VkDeviceSize srcOffset;
7364  VkDeviceSize dstOffset;
7365  VkDeviceSize size;
7366  VmaAllocation hAllocation;
7367  VmaDeviceMemoryBlock* pSrcBlock;
7368  VmaDeviceMemoryBlock* pDstBlock;
7369 };
7370 
7371 class VmaDefragmentationAlgorithm;
7372 
7373 /*
7374 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
7375 Vulkan memory type.
7376 
7377 Synchronized internally with a mutex.
7378 */
7379 struct VmaBlockVector
7380 {
7381  VMA_CLASS_NO_COPY(VmaBlockVector)
7382 public:
7383  VmaBlockVector(
7384  VmaAllocator hAllocator,
7385  VmaPool hParentPool,
7386  uint32_t memoryTypeIndex,
7387  VkDeviceSize preferredBlockSize,
7388  size_t minBlockCount,
7389  size_t maxBlockCount,
7390  VkDeviceSize bufferImageGranularity,
7391  uint32_t frameInUseCount,
7392  bool explicitBlockSize,
7393  uint32_t algorithm,
7394  float priority,
7395  VkDeviceSize minAllocationAlignment,
7396  void* pMemoryAllocateNext);
7397  ~VmaBlockVector();
7398 
7399  VkResult CreateMinBlocks();
7400 
7401  VmaAllocator GetAllocator() const { return m_hAllocator; }
7402  VmaPool GetParentPool() const { return m_hParentPool; }
7403  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
7404  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
7405  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
7406  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
7407  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
7408  uint32_t GetAlgorithm() const { return m_Algorithm; }
7409 
7410  void GetPoolStats(VmaPoolStats* pStats);
7411 
7412  bool IsEmpty();
7413  bool IsCorruptionDetectionEnabled() const;
7414 
7415  VkResult Allocate(
7416  uint32_t currentFrameIndex,
7417  VkDeviceSize size,
7418  VkDeviceSize alignment,
7419  const VmaAllocationCreateInfo& createInfo,
7420  VmaSuballocationType suballocType,
7421  size_t allocationCount,
7422  VmaAllocation* pAllocations);
7423 
7424  void Free(const VmaAllocation hAllocation);
7425 
7426  // Adds statistics of this BlockVector to pStats.
7427  void AddStats(VmaStats* pStats);
7428 
7429 #if VMA_STATS_STRING_ENABLED
7430  void PrintDetailedMap(class VmaJsonWriter& json);
7431 #endif
7432 
7433  void MakePoolAllocationsLost(
7434  uint32_t currentFrameIndex,
7435  size_t* pLostAllocationCount);
7436  VkResult CheckCorruption();
7437 
7438  // Saves results in pCtx->res.
7439  void Defragment(
7440  class VmaBlockVectorDefragmentationContext* pCtx,
7442  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
7443  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
7444  VkCommandBuffer commandBuffer);
7445  void DefragmentationEnd(
7446  class VmaBlockVectorDefragmentationContext* pCtx,
7447  uint32_t flags,
7448  VmaDefragmentationStats* pStats);
7449 
7450  uint32_t ProcessDefragmentations(
7451  class VmaBlockVectorDefragmentationContext *pCtx,
7452  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
7453 
7454  void CommitDefragmentations(
7455  class VmaBlockVectorDefragmentationContext *pCtx,
7456  VmaDefragmentationStats* pStats);
7457 
7459  // To be used only while the m_Mutex is locked. Used during defragmentation.
7460 
7461  size_t GetBlockCount() const { return m_Blocks.size(); }
7462  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
7463  size_t CalcAllocationCount() const;
7464  bool IsBufferImageGranularityConflictPossible() const;
7465 
7466 private:
7467  friend class VmaDefragmentationAlgorithm_Generic;
7468 
7469  const VmaAllocator m_hAllocator;
7470  const VmaPool m_hParentPool;
7471  const uint32_t m_MemoryTypeIndex;
7472  const VkDeviceSize m_PreferredBlockSize;
7473  const size_t m_MinBlockCount;
7474  const size_t m_MaxBlockCount;
7475  const VkDeviceSize m_BufferImageGranularity;
7476  const uint32_t m_FrameInUseCount;
7477  const bool m_ExplicitBlockSize;
7478  const uint32_t m_Algorithm;
7479  const float m_Priority;
7480  const VkDeviceSize m_MinAllocationAlignment;
7481  void* const m_pMemoryAllocateNext;
7482  VMA_RW_MUTEX m_Mutex;
7483 
7484  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
7485  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
7486  bool m_HasEmptyBlock;
7487  // Incrementally sorted by sumFreeSize, ascending.
7488  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
7489  uint32_t m_NextBlockId;
7490 
7491  VkDeviceSize CalcMaxBlockSize() const;
7492 
7493  // Finds and removes given block from vector.
7494  void Remove(VmaDeviceMemoryBlock* pBlock);
7495 
7496  // Performs single step in sorting m_Blocks. They may not be fully sorted
7497  // after this call.
7498  void IncrementallySortBlocks();
7499 
7500  VkResult AllocatePage(
7501  uint32_t currentFrameIndex,
7502  VkDeviceSize size,
7503  VkDeviceSize alignment,
7504  const VmaAllocationCreateInfo& createInfo,
7505  VmaSuballocationType suballocType,
7506  VmaAllocation* pAllocation);
7507 
7508  // To be used only without CAN_MAKE_OTHER_LOST flag.
7509  VkResult AllocateFromBlock(
7510  VmaDeviceMemoryBlock* pBlock,
7511  uint32_t currentFrameIndex,
7512  VkDeviceSize size,
7513  VkDeviceSize alignment,
7514  VmaAllocationCreateFlags allocFlags,
7515  void* pUserData,
7516  VmaSuballocationType suballocType,
7517  uint32_t strategy,
7518  VmaAllocation* pAllocation);
7519 
7520  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
7521 
7522  // Saves result to pCtx->res.
7523  void ApplyDefragmentationMovesCpu(
7524  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7525  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
7526  // Saves result to pCtx->res.
7527  void ApplyDefragmentationMovesGpu(
7528  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7529  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7530  VkCommandBuffer commandBuffer);
7531 
7532  /*
7533  Used during defragmentation. pDefragmentationStats is optional. It's in/out
7534  - updated with new data.
7535  */
7536  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
7537 
7538  void UpdateHasEmptyBlock();
7539 };
7540 
7541 struct VmaPool_T
7542 {
7543  VMA_CLASS_NO_COPY(VmaPool_T)
7544 public:
7545  VmaBlockVector m_BlockVector;
7546 
7547  VmaPool_T(
7548  VmaAllocator hAllocator,
7549  const VmaPoolCreateInfo& createInfo,
7550  VkDeviceSize preferredBlockSize);
7551  ~VmaPool_T();
7552 
7553  uint32_t GetId() const { return m_Id; }
7554  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
7555 
7556  const char* GetName() const { return m_Name; }
7557  void SetName(const char* pName);
7558 
7559 #if VMA_STATS_STRING_ENABLED
7560  //void PrintDetailedMap(class VmaStringBuilder& sb);
7561 #endif
7562 
7563 private:
7564  uint32_t m_Id;
7565  char* m_Name;
7566  VmaPool_T* m_PrevPool = VMA_NULL;
7567  VmaPool_T* m_NextPool = VMA_NULL;
7568  friend struct VmaPoolListItemTraits;
7569 };
7570 
7571 struct VmaPoolListItemTraits
7572 {
7573  typedef VmaPool_T ItemType;
7574  static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
7575  static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
7576  static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
7577  static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
7578 };
7579 
7580 /*
7581 Performs defragmentation:
7582 
7583 - Updates `pBlockVector->m_pMetadata`.
7584 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
7585 - Does not move actual data, only returns requested moves as `moves`.
7586 */
7587 class VmaDefragmentationAlgorithm
7588 {
7589  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
7590 public:
7591  VmaDefragmentationAlgorithm(
7592  VmaAllocator hAllocator,
7593  VmaBlockVector* pBlockVector,
7594  uint32_t currentFrameIndex) :
7595  m_hAllocator(hAllocator),
7596  m_pBlockVector(pBlockVector),
7597  m_CurrentFrameIndex(currentFrameIndex)
7598  {
7599  }
7600  virtual ~VmaDefragmentationAlgorithm()
7601  {
7602  }
7603 
7604  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
7605  virtual void AddAll() = 0;
7606 
7607  virtual VkResult Defragment(
7608  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7609  VkDeviceSize maxBytesToMove,
7610  uint32_t maxAllocationsToMove,
7611  VmaDefragmentationFlags flags) = 0;
7612 
7613  virtual VkDeviceSize GetBytesMoved() const = 0;
7614  virtual uint32_t GetAllocationsMoved() const = 0;
7615 
7616 protected:
7617  VmaAllocator const m_hAllocator;
7618  VmaBlockVector* const m_pBlockVector;
7619  const uint32_t m_CurrentFrameIndex;
7620 
7621  struct AllocationInfo
7622  {
7623  VmaAllocation m_hAllocation;
7624  VkBool32* m_pChanged;
7625 
7626  AllocationInfo() :
7627  m_hAllocation(VK_NULL_HANDLE),
7628  m_pChanged(VMA_NULL)
7629  {
7630  }
7631  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
7632  m_hAllocation(hAlloc),
7633  m_pChanged(pChanged)
7634  {
7635  }
7636  };
7637 };
7638 
7639 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
7640 {
7641  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
7642 public:
7643  VmaDefragmentationAlgorithm_Generic(
7644  VmaAllocator hAllocator,
7645  VmaBlockVector* pBlockVector,
7646  uint32_t currentFrameIndex,
7647  bool overlappingMoveSupported);
7648  virtual ~VmaDefragmentationAlgorithm_Generic();
7649 
7650  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7651  virtual void AddAll() { m_AllAllocations = true; }
7652 
7653  virtual VkResult Defragment(
7654  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7655  VkDeviceSize maxBytesToMove,
7656  uint32_t maxAllocationsToMove,
7657  VmaDefragmentationFlags flags);
7658 
7659  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7660  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7661 
7662 private:
7663  uint32_t m_AllocationCount;
7664  bool m_AllAllocations;
7665 
7666  VkDeviceSize m_BytesMoved;
7667  uint32_t m_AllocationsMoved;
7668 
7669  struct AllocationInfoSizeGreater
7670  {
7671  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7672  {
7673  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
7674  }
7675  };
7676 
7677  struct AllocationInfoOffsetGreater
7678  {
7679  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7680  {
7681  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
7682  }
7683  };
7684 
7685  struct BlockInfo
7686  {
7687  size_t m_OriginalBlockIndex;
7688  VmaDeviceMemoryBlock* m_pBlock;
7689  bool m_HasNonMovableAllocations;
7690  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
7691 
7692  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
7693  m_OriginalBlockIndex(SIZE_MAX),
7694  m_pBlock(VMA_NULL),
7695  m_HasNonMovableAllocations(true),
7696  m_Allocations(pAllocationCallbacks)
7697  {
7698  }
7699 
7700  void CalcHasNonMovableAllocations()
7701  {
7702  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
7703  const size_t defragmentAllocCount = m_Allocations.size();
7704  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
7705  }
7706 
7707  void SortAllocationsBySizeDescending()
7708  {
7709  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
7710  }
7711 
7712  void SortAllocationsByOffsetDescending()
7713  {
7714  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
7715  }
7716  };
7717 
7718  struct BlockPointerLess
7719  {
7720  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
7721  {
7722  return pLhsBlockInfo->m_pBlock < pRhsBlock;
7723  }
7724  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7725  {
7726  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
7727  }
7728  };
7729 
7730  // 1. Blocks with some non-movable allocations go first.
7731  // 2. Blocks with smaller sumFreeSize go first.
7732  struct BlockInfoCompareMoveDestination
7733  {
7734  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7735  {
7736  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
7737  {
7738  return true;
7739  }
7740  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
7741  {
7742  return false;
7743  }
7744  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
7745  {
7746  return true;
7747  }
7748  return false;
7749  }
7750  };
7751 
7752  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
7753  BlockInfoVector m_Blocks;
7754 
7755  VkResult DefragmentRound(
7756  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7757  VkDeviceSize maxBytesToMove,
7758  uint32_t maxAllocationsToMove,
7759  bool freeOldAllocations);
7760 
7761  size_t CalcBlocksWithNonMovableCount() const;
7762 
7763  static bool MoveMakesSense(
7764  size_t dstBlockIndex, VkDeviceSize dstOffset,
7765  size_t srcBlockIndex, VkDeviceSize srcOffset);
7766 };
7767 
7768 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
7769 {
7770  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
7771 public:
7772  VmaDefragmentationAlgorithm_Fast(
7773  VmaAllocator hAllocator,
7774  VmaBlockVector* pBlockVector,
7775  uint32_t currentFrameIndex,
7776  bool overlappingMoveSupported);
7777  virtual ~VmaDefragmentationAlgorithm_Fast();
7778 
7779  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
7780  virtual void AddAll() { m_AllAllocations = true; }
7781 
7782  virtual VkResult Defragment(
7783  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7784  VkDeviceSize maxBytesToMove,
7785  uint32_t maxAllocationsToMove,
7786  VmaDefragmentationFlags flags);
7787 
7788  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7789  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7790 
7791 private:
7792  struct BlockInfo
7793  {
7794  size_t origBlockIndex;
7795  };
7796 
7797  class FreeSpaceDatabase
7798  {
7799  public:
7800  FreeSpaceDatabase()
7801  {
7802  FreeSpace s = {};
7803  s.blockInfoIndex = SIZE_MAX;
7804  for(size_t i = 0; i < MAX_COUNT; ++i)
7805  {
7806  m_FreeSpaces[i] = s;
7807  }
7808  }
7809 
7810  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
7811  {
7812  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7813  {
7814  return;
7815  }
7816 
7817  // Find first invalid or the smallest structure.
7818  size_t bestIndex = SIZE_MAX;
7819  for(size_t i = 0; i < MAX_COUNT; ++i)
7820  {
7821  // Empty structure.
7822  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
7823  {
7824  bestIndex = i;
7825  break;
7826  }
7827  if(m_FreeSpaces[i].size < size &&
7828  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
7829  {
7830  bestIndex = i;
7831  }
7832  }
7833 
7834  if(bestIndex != SIZE_MAX)
7835  {
7836  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
7837  m_FreeSpaces[bestIndex].offset = offset;
7838  m_FreeSpaces[bestIndex].size = size;
7839  }
7840  }
7841 
7842  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
7843  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
7844  {
7845  size_t bestIndex = SIZE_MAX;
7846  VkDeviceSize bestFreeSpaceAfter = 0;
7847  for(size_t i = 0; i < MAX_COUNT; ++i)
7848  {
7849  // Structure is valid.
7850  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
7851  {
7852  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
7853  // Allocation fits into this structure.
7854  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
7855  {
7856  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
7857  (dstOffset + size);
7858  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
7859  {
7860  bestIndex = i;
7861  bestFreeSpaceAfter = freeSpaceAfter;
7862  }
7863  }
7864  }
7865  }
7866 
7867  if(bestIndex != SIZE_MAX)
7868  {
7869  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
7870  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
7871 
7872  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7873  {
7874  // Leave this structure for remaining empty space.
7875  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
7876  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
7877  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
7878  }
7879  else
7880  {
7881  // This structure becomes invalid.
7882  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
7883  }
7884 
7885  return true;
7886  }
7887 
7888  return false;
7889  }
7890 
7891  private:
7892  static const size_t MAX_COUNT = 4;
7893 
7894  struct FreeSpace
7895  {
7896  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
7897  VkDeviceSize offset;
7898  VkDeviceSize size;
7899  } m_FreeSpaces[MAX_COUNT];
7900  };
7901 
7902  const bool m_OverlappingMoveSupported;
7903 
7904  uint32_t m_AllocationCount;
7905  bool m_AllAllocations;
7906 
7907  VkDeviceSize m_BytesMoved;
7908  uint32_t m_AllocationsMoved;
7909 
7910  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
7911 
7912  void PreprocessMetadata();
7913  void PostprocessMetadata();
7914  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
7915 };
7916 
7917 struct VmaBlockDefragmentationContext
7918 {
7919  enum BLOCK_FLAG
7920  {
7921  BLOCK_FLAG_USED = 0x00000001,
7922  };
7923  uint32_t flags;
7924  VkBuffer hBuffer;
7925 };
7926 
7927 class VmaBlockVectorDefragmentationContext
7928 {
7929  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
7930 public:
7931  VkResult res;
7932  bool mutexLocked;
7933  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
7934  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
7935  uint32_t defragmentationMovesProcessed;
7936  uint32_t defragmentationMovesCommitted;
7937  bool hasDefragmentationPlan;
7938 
7939  VmaBlockVectorDefragmentationContext(
7940  VmaAllocator hAllocator,
7941  VmaPool hCustomPool, // Optional.
7942  VmaBlockVector* pBlockVector,
7943  uint32_t currFrameIndex);
7944  ~VmaBlockVectorDefragmentationContext();
7945 
7946  VmaPool GetCustomPool() const { return m_hCustomPool; }
7947  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7948  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7949 
7950  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7951  void AddAll() { m_AllAllocations = true; }
7952 
7953  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7954 
7955 private:
7956  const VmaAllocator m_hAllocator;
7957  // Null if not from custom pool.
7958  const VmaPool m_hCustomPool;
7959  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7960  VmaBlockVector* const m_pBlockVector;
7961  const uint32_t m_CurrFrameIndex;
7962  // Owner of this object.
7963  VmaDefragmentationAlgorithm* m_pAlgorithm;
7964 
7965  struct AllocInfo
7966  {
7967  VmaAllocation hAlloc;
7968  VkBool32* pChanged;
7969  };
7970  // Used between constructor and Begin.
7971  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7972  bool m_AllAllocations;
7973 };
7974 
7975 struct VmaDefragmentationContext_T
7976 {
7977 private:
7978  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7979 public:
7980  VmaDefragmentationContext_T(
7981  VmaAllocator hAllocator,
7982  uint32_t currFrameIndex,
7983  uint32_t flags,
7984  VmaDefragmentationStats* pStats);
7985  ~VmaDefragmentationContext_T();
7986 
7987  void AddPools(uint32_t poolCount, const VmaPool* pPools);
7988  void AddAllocations(
7989  uint32_t allocationCount,
7990  const VmaAllocation* pAllocations,
7991  VkBool32* pAllocationsChanged);
7992 
7993  /*
7994  Returns:
7995  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7996  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7997  - Negative value if error occurred and object can be destroyed immediately.
7998  */
7999  VkResult Defragment(
8000  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
8001  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
8002  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
8003 
8004  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
8005  VkResult DefragmentPassEnd();
8006 
8007 private:
8008  const VmaAllocator m_hAllocator;
8009  const uint32_t m_CurrFrameIndex;
8010  const uint32_t m_Flags;
8011  VmaDefragmentationStats* const m_pStats;
8012 
8013  VkDeviceSize m_MaxCpuBytesToMove;
8014  uint32_t m_MaxCpuAllocationsToMove;
8015  VkDeviceSize m_MaxGpuBytesToMove;
8016  uint32_t m_MaxGpuAllocationsToMove;
8017 
8018  // Owner of these objects.
8019  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
8020  // Owner of these objects.
8021  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
8022 };
8023 
8024 #if VMA_RECORDING_ENABLED
8025 
8026 class VmaRecorder
8027 {
8028 public:
8029  VmaRecorder();
8030  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
8031  void WriteConfiguration(
8032  const VkPhysicalDeviceProperties& devProps,
8033  const VkPhysicalDeviceMemoryProperties& memProps,
8034  uint32_t vulkanApiVersion,
8035  bool dedicatedAllocationExtensionEnabled,
8036  bool bindMemory2ExtensionEnabled,
8037  bool memoryBudgetExtensionEnabled,
8038  bool deviceCoherentMemoryExtensionEnabled);
8039  ~VmaRecorder();
8040 
8041  void RecordCreateAllocator(uint32_t frameIndex);
8042  void RecordDestroyAllocator(uint32_t frameIndex);
8043  void RecordCreatePool(uint32_t frameIndex,
8044  const VmaPoolCreateInfo& createInfo,
8045  VmaPool pool);
8046  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
8047  void RecordAllocateMemory(uint32_t frameIndex,
8048  const VkMemoryRequirements& vkMemReq,
8049  const VmaAllocationCreateInfo& createInfo,
8050  VmaAllocation allocation);
8051  void RecordAllocateMemoryPages(uint32_t frameIndex,
8052  const VkMemoryRequirements& vkMemReq,
8053  const VmaAllocationCreateInfo& createInfo,
8054  uint64_t allocationCount,
8055  const VmaAllocation* pAllocations);
8056  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
8057  const VkMemoryRequirements& vkMemReq,
8058  bool requiresDedicatedAllocation,
8059  bool prefersDedicatedAllocation,
8060  const VmaAllocationCreateInfo& createInfo,
8061  VmaAllocation allocation);
8062  void RecordAllocateMemoryForImage(uint32_t frameIndex,
8063  const VkMemoryRequirements& vkMemReq,
8064  bool requiresDedicatedAllocation,
8065  bool prefersDedicatedAllocation,
8066  const VmaAllocationCreateInfo& createInfo,
8067  VmaAllocation allocation);
8068  void RecordFreeMemory(uint32_t frameIndex,
8069  VmaAllocation allocation);
8070  void RecordFreeMemoryPages(uint32_t frameIndex,
8071  uint64_t allocationCount,
8072  const VmaAllocation* pAllocations);
8073  void RecordSetAllocationUserData(uint32_t frameIndex,
8074  VmaAllocation allocation,
8075  const void* pUserData);
8076  void RecordCreateLostAllocation(uint32_t frameIndex,
8077  VmaAllocation allocation);
8078  void RecordMapMemory(uint32_t frameIndex,
8079  VmaAllocation allocation);
8080  void RecordUnmapMemory(uint32_t frameIndex,
8081  VmaAllocation allocation);
8082  void RecordFlushAllocation(uint32_t frameIndex,
8083  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
8084  void RecordInvalidateAllocation(uint32_t frameIndex,
8085  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
8086  void RecordCreateBuffer(uint32_t frameIndex,
8087  const VkBufferCreateInfo& bufCreateInfo,
8088  const VmaAllocationCreateInfo& allocCreateInfo,
8089  VmaAllocation allocation);
8090  void RecordCreateImage(uint32_t frameIndex,
8091  const VkImageCreateInfo& imageCreateInfo,
8092  const VmaAllocationCreateInfo& allocCreateInfo,
8093  VmaAllocation allocation);
8094  void RecordDestroyBuffer(uint32_t frameIndex,
8095  VmaAllocation allocation);
8096  void RecordDestroyImage(uint32_t frameIndex,
8097  VmaAllocation allocation);
8098  void RecordTouchAllocation(uint32_t frameIndex,
8099  VmaAllocation allocation);
8100  void RecordGetAllocationInfo(uint32_t frameIndex,
8101  VmaAllocation allocation);
8102  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
8103  VmaPool pool);
8104  void RecordDefragmentationBegin(uint32_t frameIndex,
8105  const VmaDefragmentationInfo2& info,
8107  void RecordDefragmentationEnd(uint32_t frameIndex,
8109  void RecordSetPoolName(uint32_t frameIndex,
8110  VmaPool pool,
8111  const char* name);
8112 
8113 private:
8114  struct CallParams
8115  {
8116  uint32_t threadId;
8117  double time;
8118  };
8119 
8120  class UserDataString
8121  {
8122  public:
8123  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
8124  const char* GetString() const { return m_Str; }
8125 
8126  private:
8127  char m_PtrStr[17];
8128  const char* m_Str;
8129  };
8130 
8131  bool m_UseMutex;
8132  VmaRecordFlags m_Flags;
8133  FILE* m_File;
8134  VMA_MUTEX m_FileMutex;
8135  std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
8136 
8137  void GetBasicParams(CallParams& outParams);
8138 
8139  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
8140  template<typename T>
8141  void PrintPointerList(uint64_t count, const T* pItems)
8142  {
8143  if(count)
8144  {
8145  fprintf(m_File, "%p", pItems[0]);
8146  for(uint64_t i = 1; i < count; ++i)
8147  {
8148  fprintf(m_File, " %p", pItems[i]);
8149  }
8150  }
8151  }
8152 
8153  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
8154  void Flush();
8155 };
8156 
8157 #endif // #if VMA_RECORDING_ENABLED
8158 
8159 /*
8160 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
8161 */
8162 class VmaAllocationObjectAllocator
8163 {
8164  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
8165 public:
8166  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
8167 
8168  template<typename... Types> VmaAllocation Allocate(Types... args);
8169  void Free(VmaAllocation hAlloc);
8170 
8171 private:
8172  VMA_MUTEX m_Mutex;
8173  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
8174 };
8175 
8176 struct VmaCurrentBudgetData
8177 {
8178  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
8179  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
8180 
8181 #if VMA_MEMORY_BUDGET
8182  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
8183  VMA_RW_MUTEX m_BudgetMutex;
8184  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
8185  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
8186  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
8187 #endif // #if VMA_MEMORY_BUDGET
8188 
8189  VmaCurrentBudgetData()
8190  {
8191  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
8192  {
8193  m_BlockBytes[heapIndex] = 0;
8194  m_AllocationBytes[heapIndex] = 0;
8195 #if VMA_MEMORY_BUDGET
8196  m_VulkanUsage[heapIndex] = 0;
8197  m_VulkanBudget[heapIndex] = 0;
8198  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
8199 #endif
8200  }
8201 
8202 #if VMA_MEMORY_BUDGET
8203  m_OperationsSinceBudgetFetch = 0;
8204 #endif
8205  }
8206 
8207  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
8208  {
8209  m_AllocationBytes[heapIndex] += allocationSize;
8210 #if VMA_MEMORY_BUDGET
8211  ++m_OperationsSinceBudgetFetch;
8212 #endif
8213  }
8214 
8215  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
8216  {
8217  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
8218  m_AllocationBytes[heapIndex] -= allocationSize;
8219 #if VMA_MEMORY_BUDGET
8220  ++m_OperationsSinceBudgetFetch;
8221 #endif
8222  }
8223 };
8224 
8225 // Main allocator object.
8226 struct VmaAllocator_T
8227 {
8228  VMA_CLASS_NO_COPY(VmaAllocator_T)
8229 public:
8230  bool m_UseMutex;
8231  uint32_t m_VulkanApiVersion;
8232  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
8233  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
8234  bool m_UseExtMemoryBudget;
8235  bool m_UseAmdDeviceCoherentMemory;
8236  bool m_UseKhrBufferDeviceAddress;
8237  bool m_UseExtMemoryPriority;
8238  VkDevice m_hDevice;
8239  VkInstance m_hInstance;
8240  bool m_AllocationCallbacksSpecified;
8241  VkAllocationCallbacks m_AllocationCallbacks;
8242  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
8243  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
8244 
8245  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
8246  uint32_t m_HeapSizeLimitMask;
8247 
8248  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
8249  VkPhysicalDeviceMemoryProperties m_MemProps;
8250 
8251  // Default pools.
8252  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
8253 
8254  typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
8255  DedicatedAllocationLinkedList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
8256  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
8257 
8258  VmaCurrentBudgetData m_Budget;
8259  VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
8260 
8261  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
8262  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
8263  ~VmaAllocator_T();
8264 
8265  const VkAllocationCallbacks* GetAllocationCallbacks() const
8266  {
8267  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
8268  }
8269  const VmaVulkanFunctions& GetVulkanFunctions() const
8270  {
8271  return m_VulkanFunctions;
8272  }
8273 
8274  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
8275 
8276  VkDeviceSize GetBufferImageGranularity() const
8277  {
8278  return VMA_MAX(
8279  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
8280  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
8281  }
8282 
8283  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
8284  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
8285 
8286  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
8287  {
8288  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
8289  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
8290  }
8291  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
8292  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
8293  {
8294  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
8295  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8296  }
8297  // Minimum alignment for all allocations in specific memory type.
8298  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
8299  {
8300  return IsMemoryTypeNonCoherent(memTypeIndex) ?
8301  VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
8302  (VkDeviceSize)VMA_MIN_ALIGNMENT;
8303  }
8304 
8305  bool IsIntegratedGpu() const
8306  {
8307  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
8308  }
8309 
8310  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
8311 
8312 #if VMA_RECORDING_ENABLED
8313  VmaRecorder* GetRecorder() const { return m_pRecorder; }
8314 #endif
8315 
8316  void GetBufferMemoryRequirements(
8317  VkBuffer hBuffer,
8318  VkMemoryRequirements& memReq,
8319  bool& requiresDedicatedAllocation,
8320  bool& prefersDedicatedAllocation) const;
8321  void GetImageMemoryRequirements(
8322  VkImage hImage,
8323  VkMemoryRequirements& memReq,
8324  bool& requiresDedicatedAllocation,
8325  bool& prefersDedicatedAllocation) const;
8326 
8327  // Main allocation function.
8328  VkResult AllocateMemory(
8329  const VkMemoryRequirements& vkMemReq,
8330  bool requiresDedicatedAllocation,
8331  bool prefersDedicatedAllocation,
8332  VkBuffer dedicatedBuffer,
8333  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
8334  VkImage dedicatedImage,
8335  const VmaAllocationCreateInfo& createInfo,
8336  VmaSuballocationType suballocType,
8337  size_t allocationCount,
8338  VmaAllocation* pAllocations);
8339 
8340  // Main deallocation function.
8341  void FreeMemory(
8342  size_t allocationCount,
8343  const VmaAllocation* pAllocations);
8344 
8345  void CalculateStats(VmaStats* pStats);
8346 
8347  void GetBudget(
8348  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
8349 
8350 #if VMA_STATS_STRING_ENABLED
8351  void PrintDetailedMap(class VmaJsonWriter& json);
8352 #endif
8353 
8354  VkResult DefragmentationBegin(
8355  const VmaDefragmentationInfo2& info,
8356  VmaDefragmentationStats* pStats,
8357  VmaDefragmentationContext* pContext);
8358  VkResult DefragmentationEnd(
8359  VmaDefragmentationContext context);
8360 
8361  VkResult DefragmentationPassBegin(
8363  VmaDefragmentationContext context);
8364  VkResult DefragmentationPassEnd(
8365  VmaDefragmentationContext context);
8366 
8367  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
8368  bool TouchAllocation(VmaAllocation hAllocation);
8369 
8370  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
8371  void DestroyPool(VmaPool pool);
8372  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
8373 
8374  void SetCurrentFrameIndex(uint32_t frameIndex);
8375  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
8376 
8377  void MakePoolAllocationsLost(
8378  VmaPool hPool,
8379  size_t* pLostAllocationCount);
8380  VkResult CheckPoolCorruption(VmaPool hPool);
8381  VkResult CheckCorruption(uint32_t memoryTypeBits);
8382 
8383  void CreateLostAllocation(VmaAllocation* pAllocation);
8384 
8385  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
8386  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
8387  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
8388  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
8389  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
8390  VkResult BindVulkanBuffer(
8391  VkDeviceMemory memory,
8392  VkDeviceSize memoryOffset,
8393  VkBuffer buffer,
8394  const void* pNext);
8395  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
8396  VkResult BindVulkanImage(
8397  VkDeviceMemory memory,
8398  VkDeviceSize memoryOffset,
8399  VkImage image,
8400  const void* pNext);
8401 
8402  VkResult Map(VmaAllocation hAllocation, void** ppData);
8403  void Unmap(VmaAllocation hAllocation);
8404 
8405  VkResult BindBufferMemory(
8406  VmaAllocation hAllocation,
8407  VkDeviceSize allocationLocalOffset,
8408  VkBuffer hBuffer,
8409  const void* pNext);
8410  VkResult BindImageMemory(
8411  VmaAllocation hAllocation,
8412  VkDeviceSize allocationLocalOffset,
8413  VkImage hImage,
8414  const void* pNext);
8415 
8416  VkResult FlushOrInvalidateAllocation(
8417  VmaAllocation hAllocation,
8418  VkDeviceSize offset, VkDeviceSize size,
8419  VMA_CACHE_OPERATION op);
8420  VkResult FlushOrInvalidateAllocations(
8421  uint32_t allocationCount,
8422  const VmaAllocation* allocations,
8423  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
8424  VMA_CACHE_OPERATION op);
8425 
8426  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
8427 
8428  /*
8429  Returns bit mask of memory types that can support defragmentation on GPU as
8430  they support creation of required buffer for copy operations.
8431  */
8432  uint32_t GetGpuDefragmentationMemoryTypeBits();
8433 
8434 #if VMA_EXTERNAL_MEMORY
8435  VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const
8436  {
8437  return m_TypeExternalMemoryHandleTypes[memTypeIndex];
8438  }
8439 #endif // #if VMA_EXTERNAL_MEMORY
8440 
8441 private:
8442  VkDeviceSize m_PreferredLargeHeapBlockSize;
8443 
8444  VkPhysicalDevice m_PhysicalDevice;
8445  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
8446  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
8447 #if VMA_EXTERNAL_MEMORY
8448  VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];
8449 #endif // #if VMA_EXTERNAL_MEMORY
8450 
8451  VMA_RW_MUTEX m_PoolsMutex;
8452  typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
8453  // Protected by m_PoolsMutex.
8454  PoolList m_Pools;
8455  uint32_t m_NextPoolId;
8456 
8457  VmaVulkanFunctions m_VulkanFunctions;
8458 
8459  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
8460  uint32_t m_GlobalMemoryTypeBits;
8461 
8462 #if VMA_RECORDING_ENABLED
8463  VmaRecorder* m_pRecorder;
8464 #endif
8465 
8466  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
8467 
8468 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
8469  void ImportVulkanFunctions_Static();
8470 #endif
8471 
8472  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
8473 
8474 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
8475  void ImportVulkanFunctions_Dynamic();
8476 #endif
8477 
8478  void ValidateVulkanFunctions();
8479 
8480  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
8481 
8482  VkResult AllocateMemoryOfType(
8483  VkDeviceSize size,
8484  VkDeviceSize alignment,
8485  bool dedicatedAllocation,
8486  VkBuffer dedicatedBuffer,
8487  VkBufferUsageFlags dedicatedBufferUsage,
8488  VkImage dedicatedImage,
8489  const VmaAllocationCreateInfo& createInfo,
8490  uint32_t memTypeIndex,
8491  VmaSuballocationType suballocType,
8492  size_t allocationCount,
8493  VmaAllocation* pAllocations);
8494 
8495  // Helper function only to be used inside AllocateDedicatedMemory.
8496  VkResult AllocateDedicatedMemoryPage(
8497  VkDeviceSize size,
8498  VmaSuballocationType suballocType,
8499  uint32_t memTypeIndex,
8500  const VkMemoryAllocateInfo& allocInfo,
8501  bool map,
8502  bool isUserDataString,
8503  void* pUserData,
8504  VmaAllocation* pAllocation);
8505 
8506  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
8507  VkResult AllocateDedicatedMemory(
8508  VkDeviceSize size,
8509  VmaSuballocationType suballocType,
8510  uint32_t memTypeIndex,
8511  bool withinBudget,
8512  bool map,
8513  bool isUserDataString,
8514  void* pUserData,
8515  float priority,
8516  VkBuffer dedicatedBuffer,
8517  VkBufferUsageFlags dedicatedBufferUsage,
8518  VkImage dedicatedImage,
8519  size_t allocationCount,
8520  VmaAllocation* pAllocations);
8521 
8522  void FreeDedicatedMemory(const VmaAllocation allocation);
8523 
8524  /*
8525  Calculates and returns bit mask of memory types that can support defragmentation
8526  on GPU as they support creation of required buffer for copy operations.
8527  */
8528  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
8529 
8530  uint32_t CalculateGlobalMemoryTypeBits() const;
8531 
8532  bool GetFlushOrInvalidateRange(
8533  VmaAllocation allocation,
8534  VkDeviceSize offset, VkDeviceSize size,
8535  VkMappedMemoryRange& outRange) const;
8536 
8537 #if VMA_MEMORY_BUDGET
8538  void UpdateVulkanBudget();
8539 #endif // #if VMA_MEMORY_BUDGET
8540 };
8541 
8543 // Memory allocation #2 after VmaAllocator_T definition
8544 
8545 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
8546 {
8547  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
8548 }
8549 
8550 static void VmaFree(VmaAllocator hAllocator, void* ptr)
8551 {
8552  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
8553 }
8554 
8555 template<typename T>
8556 static T* VmaAllocate(VmaAllocator hAllocator)
8557 {
8558  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
8559 }
8560 
8561 template<typename T>
8562 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
8563 {
8564  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
8565 }
8566 
8567 template<typename T>
8568 static void vma_delete(VmaAllocator hAllocator, T* ptr)
8569 {
8570  if(ptr != VMA_NULL)
8571  {
8572  ptr->~T();
8573  VmaFree(hAllocator, ptr);
8574  }
8575 }
8576 
8577 template<typename T>
8578 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
8579 {
8580  if(ptr != VMA_NULL)
8581  {
8582  for(size_t i = count; i--; )
8583  ptr[i].~T();
8584  VmaFree(hAllocator, ptr);
8585  }
8586 }
8587 
8589 // VmaStringBuilder
8590 
8591 #if VMA_STATS_STRING_ENABLED
8592 
8593 class VmaStringBuilder
8594 {
8595 public:
8596  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
8597  size_t GetLength() const { return m_Data.size(); }
8598  const char* GetData() const { return m_Data.data(); }
8599 
8600  void Add(char ch) { m_Data.push_back(ch); }
8601  void Add(const char* pStr);
8602  void AddNewLine() { Add('\n'); }
8603  void AddNumber(uint32_t num);
8604  void AddNumber(uint64_t num);
8605  void AddPointer(const void* ptr);
8606 
8607 private:
8608  VmaVector< char, VmaStlAllocator<char> > m_Data;
8609 };
8610 
8611 void VmaStringBuilder::Add(const char* pStr)
8612 {
8613  const size_t strLen = strlen(pStr);
8614  if(strLen > 0)
8615  {
8616  const size_t oldCount = m_Data.size();
8617  m_Data.resize(oldCount + strLen);
8618  memcpy(m_Data.data() + oldCount, pStr, strLen);
8619  }
8620 }
8621 
8622 void VmaStringBuilder::AddNumber(uint32_t num)
8623 {
8624  char buf[11];
8625  buf[10] = '\0';
8626  char *p = &buf[10];
8627  do
8628  {
8629  *--p = '0' + (num % 10);
8630  num /= 10;
8631  }
8632  while(num);
8633  Add(p);
8634 }
8635 
8636 void VmaStringBuilder::AddNumber(uint64_t num)
8637 {
8638  char buf[21];
8639  buf[20] = '\0';
8640  char *p = &buf[20];
8641  do
8642  {
8643  *--p = '0' + (num % 10);
8644  num /= 10;
8645  }
8646  while(num);
8647  Add(p);
8648 }
8649 
8650 void VmaStringBuilder::AddPointer(const void* ptr)
8651 {
8652  char buf[21];
8653  VmaPtrToStr(buf, sizeof(buf), ptr);
8654  Add(buf);
8655 }
8656 
8657 #endif // #if VMA_STATS_STRING_ENABLED
8658 
8660 // VmaJsonWriter
8661 
8662 #if VMA_STATS_STRING_ENABLED
8663 
8664 class VmaJsonWriter
8665 {
8666  VMA_CLASS_NO_COPY(VmaJsonWriter)
8667 public:
8668  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
8669  ~VmaJsonWriter();
8670 
8671  void BeginObject(bool singleLine = false);
8672  void EndObject();
8673 
8674  void BeginArray(bool singleLine = false);
8675  void EndArray();
8676 
8677  void WriteString(const char* pStr);
8678  void BeginString(const char* pStr = VMA_NULL);
8679  void ContinueString(const char* pStr);
8680  void ContinueString(uint32_t n);
8681  void ContinueString(uint64_t n);
8682  void ContinueString_Pointer(const void* ptr);
8683  void EndString(const char* pStr = VMA_NULL);
8684 
8685  void WriteNumber(uint32_t n);
8686  void WriteNumber(uint64_t n);
8687  void WriteBool(bool b);
8688  void WriteNull();
8689 
8690 private:
8691  static const char* const INDENT;
8692 
8693  enum COLLECTION_TYPE
8694  {
8695  COLLECTION_TYPE_OBJECT,
8696  COLLECTION_TYPE_ARRAY,
8697  };
8698  struct StackItem
8699  {
8700  COLLECTION_TYPE type;
8701  uint32_t valueCount;
8702  bool singleLineMode;
8703  };
8704 
8705  VmaStringBuilder& m_SB;
8706  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
8707  bool m_InsideString;
8708 
8709  void BeginValue(bool isString);
8710  void WriteIndent(bool oneLess = false);
8711 };
8712 
8713 const char* const VmaJsonWriter::INDENT = " ";
8714 
8715 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
8716  m_SB(sb),
8717  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
8718  m_InsideString(false)
8719 {
8720 }
8721 
8722 VmaJsonWriter::~VmaJsonWriter()
8723 {
8724  VMA_ASSERT(!m_InsideString);
8725  VMA_ASSERT(m_Stack.empty());
8726 }
8727 
8728 void VmaJsonWriter::BeginObject(bool singleLine)
8729 {
8730  VMA_ASSERT(!m_InsideString);
8731 
8732  BeginValue(false);
8733  m_SB.Add('{');
8734 
8735  StackItem item;
8736  item.type = COLLECTION_TYPE_OBJECT;
8737  item.valueCount = 0;
8738  item.singleLineMode = singleLine;
8739  m_Stack.push_back(item);
8740 }
8741 
8742 void VmaJsonWriter::EndObject()
8743 {
8744  VMA_ASSERT(!m_InsideString);
8745 
8746  WriteIndent(true);
8747  m_SB.Add('}');
8748 
8749  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
8750  m_Stack.pop_back();
8751 }
8752 
8753 void VmaJsonWriter::BeginArray(bool singleLine)
8754 {
8755  VMA_ASSERT(!m_InsideString);
8756 
8757  BeginValue(false);
8758  m_SB.Add('[');
8759 
8760  StackItem item;
8761  item.type = COLLECTION_TYPE_ARRAY;
8762  item.valueCount = 0;
8763  item.singleLineMode = singleLine;
8764  m_Stack.push_back(item);
8765 }
8766 
8767 void VmaJsonWriter::EndArray()
8768 {
8769  VMA_ASSERT(!m_InsideString);
8770 
8771  WriteIndent(true);
8772  m_SB.Add(']');
8773 
8774  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
8775  m_Stack.pop_back();
8776 }
8777 
8778 void VmaJsonWriter::WriteString(const char* pStr)
8779 {
8780  BeginString(pStr);
8781  EndString();
8782 }
8783 
8784 void VmaJsonWriter::BeginString(const char* pStr)
8785 {
8786  VMA_ASSERT(!m_InsideString);
8787 
8788  BeginValue(true);
8789  m_SB.Add('"');
8790  m_InsideString = true;
8791  if(pStr != VMA_NULL && pStr[0] != '\0')
8792  {
8793  ContinueString(pStr);
8794  }
8795 }
8796 
8797 void VmaJsonWriter::ContinueString(const char* pStr)
8798 {
8799  VMA_ASSERT(m_InsideString);
8800 
8801  const size_t strLen = strlen(pStr);
8802  for(size_t i = 0; i < strLen; ++i)
8803  {
8804  char ch = pStr[i];
8805  if(ch == '\\')
8806  {
8807  m_SB.Add("\\\\");
8808  }
8809  else if(ch == '"')
8810  {
8811  m_SB.Add("\\\"");
8812  }
8813  else if(ch >= 32)
8814  {
8815  m_SB.Add(ch);
8816  }
8817  else switch(ch)
8818  {
8819  case '\b':
8820  m_SB.Add("\\b");
8821  break;
8822  case '\f':
8823  m_SB.Add("\\f");
8824  break;
8825  case '\n':
8826  m_SB.Add("\\n");
8827  break;
8828  case '\r':
8829  m_SB.Add("\\r");
8830  break;
8831  case '\t':
8832  m_SB.Add("\\t");
8833  break;
8834  default:
8835  VMA_ASSERT(0 && "Character not currently supported.");
8836  break;
8837  }
8838  }
8839 }
8840 
8841 void VmaJsonWriter::ContinueString(uint32_t n)
8842 {
8843  VMA_ASSERT(m_InsideString);
8844  m_SB.AddNumber(n);
8845 }
8846 
8847 void VmaJsonWriter::ContinueString(uint64_t n)
8848 {
8849  VMA_ASSERT(m_InsideString);
8850  m_SB.AddNumber(n);
8851 }
8852 
8853 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
8854 {
8855  VMA_ASSERT(m_InsideString);
8856  m_SB.AddPointer(ptr);
8857 }
8858 
8859 void VmaJsonWriter::EndString(const char* pStr)
8860 {
8861  VMA_ASSERT(m_InsideString);
8862  if(pStr != VMA_NULL && pStr[0] != '\0')
8863  {
8864  ContinueString(pStr);
8865  }
8866  m_SB.Add('"');
8867  m_InsideString = false;
8868 }
8869 
8870 void VmaJsonWriter::WriteNumber(uint32_t n)
8871 {
8872  VMA_ASSERT(!m_InsideString);
8873  BeginValue(false);
8874  m_SB.AddNumber(n);
8875 }
8876 
8877 void VmaJsonWriter::WriteNumber(uint64_t n)
8878 {
8879  VMA_ASSERT(!m_InsideString);
8880  BeginValue(false);
8881  m_SB.AddNumber(n);
8882 }
8883 
8884 void VmaJsonWriter::WriteBool(bool b)
8885 {
8886  VMA_ASSERT(!m_InsideString);
8887  BeginValue(false);
8888  m_SB.Add(b ? "true" : "false");
8889 }
8890 
8891 void VmaJsonWriter::WriteNull()
8892 {
8893  VMA_ASSERT(!m_InsideString);
8894  BeginValue(false);
8895  m_SB.Add("null");
8896 }
8897 
8898 void VmaJsonWriter::BeginValue(bool isString)
8899 {
8900  if(!m_Stack.empty())
8901  {
8902  StackItem& currItem = m_Stack.back();
8903  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8904  currItem.valueCount % 2 == 0)
8905  {
8906  VMA_ASSERT(isString);
8907  }
8908 
8909  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8910  currItem.valueCount % 2 != 0)
8911  {
8912  m_SB.Add(": ");
8913  }
8914  else if(currItem.valueCount > 0)
8915  {
8916  m_SB.Add(", ");
8917  WriteIndent();
8918  }
8919  else
8920  {
8921  WriteIndent();
8922  }
8923  ++currItem.valueCount;
8924  }
8925 }
8926 
8927 void VmaJsonWriter::WriteIndent(bool oneLess)
8928 {
8929  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
8930  {
8931  m_SB.AddNewLine();
8932 
8933  size_t count = m_Stack.size();
8934  if(count > 0 && oneLess)
8935  {
8936  --count;
8937  }
8938  for(size_t i = 0; i < count; ++i)
8939  {
8940  m_SB.Add(INDENT);
8941  }
8942  }
8943 }
8944 
8945 #endif // #if VMA_STATS_STRING_ENABLED
8946 
8948 
8949 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
8950 {
8951  if(IsUserDataString())
8952  {
8953  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
8954 
8955  FreeUserDataString(hAllocator);
8956 
8957  if(pUserData != VMA_NULL)
8958  {
8959  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
8960  }
8961  }
8962  else
8963  {
8964  m_pUserData = pUserData;
8965  }
8966 }
8967 
8968 void VmaAllocation_T::ChangeBlockAllocation(
8969  VmaAllocator hAllocator,
8970  VmaDeviceMemoryBlock* block,
8971  VkDeviceSize offset)
8972 {
8973  VMA_ASSERT(block != VMA_NULL);
8974  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8975 
8976  // Move mapping reference counter from old block to new block.
8977  if(block != m_BlockAllocation.m_Block)
8978  {
8979  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8980  if(IsPersistentMap())
8981  ++mapRefCount;
8982  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8983  block->Map(hAllocator, mapRefCount, VMA_NULL);
8984  }
8985 
8986  m_BlockAllocation.m_Block = block;
8987  m_BlockAllocation.m_Offset = offset;
8988 }
8989 
8990 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8991 {
8992  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8993  m_BlockAllocation.m_Offset = newOffset;
8994 }
8995 
8996 VkDeviceSize VmaAllocation_T::GetOffset() const
8997 {
8998  switch(m_Type)
8999  {
9000  case ALLOCATION_TYPE_BLOCK:
9001  return m_BlockAllocation.m_Offset;
9002  case ALLOCATION_TYPE_DEDICATED:
9003  return 0;
9004  default:
9005  VMA_ASSERT(0);
9006  return 0;
9007  }
9008 }
9009 
9010 VkDeviceMemory VmaAllocation_T::GetMemory() const
9011 {
9012  switch(m_Type)
9013  {
9014  case ALLOCATION_TYPE_BLOCK:
9015  return m_BlockAllocation.m_Block->GetDeviceMemory();
9016  case ALLOCATION_TYPE_DEDICATED:
9017  return m_DedicatedAllocation.m_hMemory;
9018  default:
9019  VMA_ASSERT(0);
9020  return VK_NULL_HANDLE;
9021  }
9022 }
9023 
9024 void* VmaAllocation_T::GetMappedData() const
9025 {
9026  switch(m_Type)
9027  {
9028  case ALLOCATION_TYPE_BLOCK:
9029  if(m_MapCount != 0)
9030  {
9031  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
9032  VMA_ASSERT(pBlockData != VMA_NULL);
9033  return (char*)pBlockData + m_BlockAllocation.m_Offset;
9034  }
9035  else
9036  {
9037  return VMA_NULL;
9038  }
9039  break;
9040  case ALLOCATION_TYPE_DEDICATED:
9041  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
9042  return m_DedicatedAllocation.m_pMappedData;
9043  default:
9044  VMA_ASSERT(0);
9045  return VMA_NULL;
9046  }
9047 }
9048 
9049 bool VmaAllocation_T::CanBecomeLost() const
9050 {
9051  switch(m_Type)
9052  {
9053  case ALLOCATION_TYPE_BLOCK:
9054  return m_BlockAllocation.m_CanBecomeLost;
9055  case ALLOCATION_TYPE_DEDICATED:
9056  return false;
9057  default:
9058  VMA_ASSERT(0);
9059  return false;
9060  }
9061 }
9062 
9063 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9064 {
9065  VMA_ASSERT(CanBecomeLost());
9066 
9067  /*
9068  Warning: This is a carefully designed algorithm.
9069  Do not modify unless you really know what you're doing :)
9070  */
9071  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
9072  for(;;)
9073  {
9074  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
9075  {
9076  VMA_ASSERT(0);
9077  return false;
9078  }
9079  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
9080  {
9081  return false;
9082  }
9083  else // Last use time earlier than current time.
9084  {
9085  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
9086  {
9087  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
9088  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
9089  return true;
9090  }
9091  }
9092  }
9093 }
9094 
9095 #if VMA_STATS_STRING_ENABLED
9096 
9097 // Correspond to values of enum VmaSuballocationType.
9098 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
9099  "FREE",
9100  "UNKNOWN",
9101  "BUFFER",
9102  "IMAGE_UNKNOWN",
9103  "IMAGE_LINEAR",
9104  "IMAGE_OPTIMAL",
9105 };
9106 
9107 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
9108 {
9109  json.WriteString("Type");
9110  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
9111 
9112  json.WriteString("Size");
9113  json.WriteNumber(m_Size);
9114 
9115  if(m_pUserData != VMA_NULL)
9116  {
9117  json.WriteString("UserData");
9118  if(IsUserDataString())
9119  {
9120  json.WriteString((const char*)m_pUserData);
9121  }
9122  else
9123  {
9124  json.BeginString();
9125  json.ContinueString_Pointer(m_pUserData);
9126  json.EndString();
9127  }
9128  }
9129 
9130  json.WriteString("CreationFrameIndex");
9131  json.WriteNumber(m_CreationFrameIndex);
9132 
9133  json.WriteString("LastUseFrameIndex");
9134  json.WriteNumber(GetLastUseFrameIndex());
9135 
9136  if(m_BufferImageUsage != 0)
9137  {
9138  json.WriteString("Usage");
9139  json.WriteNumber(m_BufferImageUsage);
9140  }
9141 }
9142 
9143 #endif
9144 
9145 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
9146 {
9147  VMA_ASSERT(IsUserDataString());
9148  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
9149  m_pUserData = VMA_NULL;
9150 }
9151 
9152 void VmaAllocation_T::BlockAllocMap()
9153 {
9154  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
9155 
9156  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
9157  {
9158  ++m_MapCount;
9159  }
9160  else
9161  {
9162  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
9163  }
9164 }
9165 
9166 void VmaAllocation_T::BlockAllocUnmap()
9167 {
9168  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
9169 
9170  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
9171  {
9172  --m_MapCount;
9173  }
9174  else
9175  {
9176  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
9177  }
9178 }
9179 
9180 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
9181 {
9182  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
9183 
9184  if(m_MapCount != 0)
9185  {
9186  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
9187  {
9188  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
9189  *ppData = m_DedicatedAllocation.m_pMappedData;
9190  ++m_MapCount;
9191  return VK_SUCCESS;
9192  }
9193  else
9194  {
9195  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
9196  return VK_ERROR_MEMORY_MAP_FAILED;
9197  }
9198  }
9199  else
9200  {
9201  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9202  hAllocator->m_hDevice,
9203  m_DedicatedAllocation.m_hMemory,
9204  0, // offset
9205  VK_WHOLE_SIZE,
9206  0, // flags
9207  ppData);
9208  if(result == VK_SUCCESS)
9209  {
9210  m_DedicatedAllocation.m_pMappedData = *ppData;
9211  m_MapCount = 1;
9212  }
9213  return result;
9214  }
9215 }
9216 
9217 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
9218 {
9219  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
9220 
9221  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
9222  {
9223  --m_MapCount;
9224  if(m_MapCount == 0)
9225  {
9226  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
9227  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
9228  hAllocator->m_hDevice,
9229  m_DedicatedAllocation.m_hMemory);
9230  }
9231  }
9232  else
9233  {
9234  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
9235  }
9236 }
9237 
9238 #if VMA_STATS_STRING_ENABLED
9239 
9240 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
9241 {
9242  json.BeginObject();
9243 
9244  json.WriteString("Blocks");
9245  json.WriteNumber(stat.blockCount);
9246 
9247  json.WriteString("Allocations");
9248  json.WriteNumber(stat.allocationCount);
9249 
9250  json.WriteString("UnusedRanges");
9251  json.WriteNumber(stat.unusedRangeCount);
9252 
9253  json.WriteString("UsedBytes");
9254  json.WriteNumber(stat.usedBytes);
9255 
9256  json.WriteString("UnusedBytes");
9257  json.WriteNumber(stat.unusedBytes);
9258 
9259  if(stat.allocationCount > 1)
9260  {
9261  json.WriteString("AllocationSize");
9262  json.BeginObject(true);
9263  json.WriteString("Min");
9264  json.WriteNumber(stat.allocationSizeMin);
9265  json.WriteString("Avg");
9266  json.WriteNumber(stat.allocationSizeAvg);
9267  json.WriteString("Max");
9268  json.WriteNumber(stat.allocationSizeMax);
9269  json.EndObject();
9270  }
9271 
9272  if(stat.unusedRangeCount > 1)
9273  {
9274  json.WriteString("UnusedRangeSize");
9275  json.BeginObject(true);
9276  json.WriteString("Min");
9277  json.WriteNumber(stat.unusedRangeSizeMin);
9278  json.WriteString("Avg");
9279  json.WriteNumber(stat.unusedRangeSizeAvg);
9280  json.WriteString("Max");
9281  json.WriteNumber(stat.unusedRangeSizeMax);
9282  json.EndObject();
9283  }
9284 
9285  json.EndObject();
9286 }
9287 
9288 #endif // #if VMA_STATS_STRING_ENABLED
9289 
9290 struct VmaSuballocationItemSizeLess
9291 {
9292  bool operator()(
9293  const VmaSuballocationList::iterator lhs,
9294  const VmaSuballocationList::iterator rhs) const
9295  {
9296  return lhs->size < rhs->size;
9297  }
9298  bool operator()(
9299  const VmaSuballocationList::iterator lhs,
9300  VkDeviceSize rhsSize) const
9301  {
9302  return lhs->size < rhsSize;
9303  }
9304 };
9305 
9306 
9308 // class VmaBlockMetadata
9309 
9310 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
9311  m_Size(0),
9312  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
9313 {
9314 }
9315 
9316 #if VMA_STATS_STRING_ENABLED
9317 
9318 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
9319  VkDeviceSize unusedBytes,
9320  size_t allocationCount,
9321  size_t unusedRangeCount) const
9322 {
9323  json.BeginObject();
9324 
9325  json.WriteString("TotalBytes");
9326  json.WriteNumber(GetSize());
9327 
9328  json.WriteString("UnusedBytes");
9329  json.WriteNumber(unusedBytes);
9330 
9331  json.WriteString("Allocations");
9332  json.WriteNumber((uint64_t)allocationCount);
9333 
9334  json.WriteString("UnusedRanges");
9335  json.WriteNumber((uint64_t)unusedRangeCount);
9336 
9337  json.WriteString("Suballocations");
9338  json.BeginArray();
9339 }
9340 
9341 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
9342  VkDeviceSize offset,
9343  VmaAllocation hAllocation) const
9344 {
9345  json.BeginObject(true);
9346 
9347  json.WriteString("Offset");
9348  json.WriteNumber(offset);
9349 
9350  hAllocation->PrintParameters(json);
9351 
9352  json.EndObject();
9353 }
9354 
9355 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
9356  VkDeviceSize offset,
9357  VkDeviceSize size) const
9358 {
9359  json.BeginObject(true);
9360 
9361  json.WriteString("Offset");
9362  json.WriteNumber(offset);
9363 
9364  json.WriteString("Type");
9365  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
9366 
9367  json.WriteString("Size");
9368  json.WriteNumber(size);
9369 
9370  json.EndObject();
9371 }
9372 
9373 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
9374 {
9375  json.EndArray();
9376  json.EndObject();
9377 }
9378 
9379 #endif // #if VMA_STATS_STRING_ENABLED
9380 
9382 // class VmaBlockMetadata_Generic
9383 
9384 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
9385  VmaBlockMetadata(hAllocator),
9386  m_FreeCount(0),
9387  m_SumFreeSize(0),
9388  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9389  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
9390 {
9391 }
9392 
9393 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
9394 {
9395 }
9396 
9397 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
9398 {
9399  VmaBlockMetadata::Init(size);
9400 
9401  m_FreeCount = 1;
9402  m_SumFreeSize = size;
9403 
9404  VmaSuballocation suballoc = {};
9405  suballoc.offset = 0;
9406  suballoc.size = size;
9407  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9408  suballoc.hAllocation = VK_NULL_HANDLE;
9409 
9410  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9411  m_Suballocations.push_back(suballoc);
9412  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
9413  --suballocItem;
9414  m_FreeSuballocationsBySize.push_back(suballocItem);
9415 }
9416 
9417 bool VmaBlockMetadata_Generic::Validate() const
9418 {
9419  VMA_VALIDATE(!m_Suballocations.empty());
9420 
9421  // Expected offset of new suballocation as calculated from previous ones.
9422  VkDeviceSize calculatedOffset = 0;
9423  // Expected number of free suballocations as calculated from traversing their list.
9424  uint32_t calculatedFreeCount = 0;
9425  // Expected sum size of free suballocations as calculated from traversing their list.
9426  VkDeviceSize calculatedSumFreeSize = 0;
9427  // Expected number of free suballocations that should be registered in
9428  // m_FreeSuballocationsBySize calculated from traversing their list.
9429  size_t freeSuballocationsToRegister = 0;
9430  // True if previous visited suballocation was free.
9431  bool prevFree = false;
9432 
9433  for(const auto& subAlloc : m_Suballocations)
9434  {
9435  // Actual offset of this suballocation doesn't match expected one.
9436  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
9437 
9438  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
9439  // Two adjacent free suballocations are invalid. They should be merged.
9440  VMA_VALIDATE(!prevFree || !currFree);
9441 
9442  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
9443 
9444  if(currFree)
9445  {
9446  calculatedSumFreeSize += subAlloc.size;
9447  ++calculatedFreeCount;
9448  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9449  {
9450  ++freeSuballocationsToRegister;
9451  }
9452 
9453  // Margin required between allocations - every free space must be at least that large.
9454  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
9455  }
9456  else
9457  {
9458  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
9459  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
9460 
9461  // Margin required between allocations - previous allocation must be free.
9462  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
9463  }
9464 
9465  calculatedOffset += subAlloc.size;
9466  prevFree = currFree;
9467  }
9468 
9469  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
9470  // match expected one.
9471  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
9472 
9473  VkDeviceSize lastSize = 0;
9474  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
9475  {
9476  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
9477 
9478  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
9479  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9480  // They must be sorted by size ascending.
9481  VMA_VALIDATE(suballocItem->size >= lastSize);
9482 
9483  lastSize = suballocItem->size;
9484  }
9485 
9486  // Check if totals match calculated values.
9487  VMA_VALIDATE(ValidateFreeSuballocationList());
9488  VMA_VALIDATE(calculatedOffset == GetSize());
9489  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
9490  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
9491 
9492  return true;
9493 }
9494 
9495 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
9496 {
9497  if(!m_FreeSuballocationsBySize.empty())
9498  {
9499  return m_FreeSuballocationsBySize.back()->size;
9500  }
9501  else
9502  {
9503  return 0;
9504  }
9505 }
9506 
9507 bool VmaBlockMetadata_Generic::IsEmpty() const
9508 {
9509  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
9510 }
9511 
9512 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9513 {
9514  outInfo.blockCount = 1;
9515 
9516  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9517  outInfo.allocationCount = rangeCount - m_FreeCount;
9518  outInfo.unusedRangeCount = m_FreeCount;
9519 
9520  outInfo.unusedBytes = m_SumFreeSize;
9521  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
9522 
9523  outInfo.allocationSizeMin = UINT64_MAX;
9524  outInfo.allocationSizeMax = 0;
9525  outInfo.unusedRangeSizeMin = UINT64_MAX;
9526  outInfo.unusedRangeSizeMax = 0;
9527 
9528  for(const auto& suballoc : m_Suballocations)
9529  {
9530  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9531  {
9532  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9533  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
9534  }
9535  else
9536  {
9537  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
9538  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
9539  }
9540  }
9541 }
9542 
9543 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
9544 {
9545  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9546 
9547  inoutStats.size += GetSize();
9548  inoutStats.unusedSize += m_SumFreeSize;
9549  inoutStats.allocationCount += rangeCount - m_FreeCount;
9550  inoutStats.unusedRangeCount += m_FreeCount;
9551  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9552 }
9553 
9554 #if VMA_STATS_STRING_ENABLED
9555 
9556 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
9557 {
9558  PrintDetailedMap_Begin(json,
9559  m_SumFreeSize, // unusedBytes
9560  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
9561  m_FreeCount); // unusedRangeCount
9562 
9563  size_t i = 0;
9564  for(const auto& suballoc : m_Suballocations)
9565  {
9566  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9567  {
9568  PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
9569  }
9570  else
9571  {
9572  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9573  }
9574  }
9575 
9576  PrintDetailedMap_End(json);
9577 }
9578 
9579 #endif // #if VMA_STATS_STRING_ENABLED
9580 
9581 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
9582  uint32_t currentFrameIndex,
9583  uint32_t frameInUseCount,
9584  VkDeviceSize bufferImageGranularity,
9585  VkDeviceSize allocSize,
9586  VkDeviceSize allocAlignment,
9587  bool upperAddress,
9588  VmaSuballocationType allocType,
9589  bool canMakeOtherLost,
9590  uint32_t strategy,
9591  VmaAllocationRequest* pAllocationRequest)
9592 {
9593  VMA_ASSERT(allocSize > 0);
9594  VMA_ASSERT(!upperAddress);
9595  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9596  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9597  VMA_HEAVY_ASSERT(Validate());
9598 
9599  pAllocationRequest->type = VmaAllocationRequestType::Normal;
9600 
9601  // There is not enough total free space in this block to fullfill the request: Early return.
9602  if(canMakeOtherLost == false &&
9603  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
9604  {
9605  return false;
9606  }
9607 
9608  // New algorithm, efficiently searching freeSuballocationsBySize.
9609  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
9610  if(freeSuballocCount > 0)
9611  {
9613  {
9614  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
9615  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9616  m_FreeSuballocationsBySize.data(),
9617  m_FreeSuballocationsBySize.data() + freeSuballocCount,
9618  allocSize + 2 * VMA_DEBUG_MARGIN,
9619  VmaSuballocationItemSizeLess());
9620  size_t index = it - m_FreeSuballocationsBySize.data();
9621  for(; index < freeSuballocCount; ++index)
9622  {
9623  if(CheckAllocation(
9624  currentFrameIndex,
9625  frameInUseCount,
9626  bufferImageGranularity,
9627  allocSize,
9628  allocAlignment,
9629  allocType,
9630  m_FreeSuballocationsBySize[index],
9631  false, // canMakeOtherLost
9632  &pAllocationRequest->offset,
9633  &pAllocationRequest->itemsToMakeLostCount,
9634  &pAllocationRequest->sumFreeSize,
9635  &pAllocationRequest->sumItemSize))
9636  {
9637  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9638  return true;
9639  }
9640  }
9641  }
9642  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
9643  {
9644  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9645  it != m_Suballocations.end();
9646  ++it)
9647  {
9648  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
9649  currentFrameIndex,
9650  frameInUseCount,
9651  bufferImageGranularity,
9652  allocSize,
9653  allocAlignment,
9654  allocType,
9655  it,
9656  false, // canMakeOtherLost
9657  &pAllocationRequest->offset,
9658  &pAllocationRequest->itemsToMakeLostCount,
9659  &pAllocationRequest->sumFreeSize,
9660  &pAllocationRequest->sumItemSize))
9661  {
9662  pAllocationRequest->item = it;
9663  return true;
9664  }
9665  }
9666  }
9667  else // WORST_FIT, FIRST_FIT
9668  {
9669  // Search staring from biggest suballocations.
9670  for(size_t index = freeSuballocCount; index--; )
9671  {
9672  if(CheckAllocation(
9673  currentFrameIndex,
9674  frameInUseCount,
9675  bufferImageGranularity,
9676  allocSize,
9677  allocAlignment,
9678  allocType,
9679  m_FreeSuballocationsBySize[index],
9680  false, // canMakeOtherLost
9681  &pAllocationRequest->offset,
9682  &pAllocationRequest->itemsToMakeLostCount,
9683  &pAllocationRequest->sumFreeSize,
9684  &pAllocationRequest->sumItemSize))
9685  {
9686  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9687  return true;
9688  }
9689  }
9690  }
9691  }
9692 
9693  if(canMakeOtherLost)
9694  {
9695  // Brute-force algorithm. TODO: Come up with something better.
9696 
9697  bool found = false;
9698  VmaAllocationRequest tmpAllocRequest = {};
9699  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
9700  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
9701  suballocIt != m_Suballocations.end();
9702  ++suballocIt)
9703  {
9704  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
9705  suballocIt->hAllocation->CanBecomeLost())
9706  {
9707  if(CheckAllocation(
9708  currentFrameIndex,
9709  frameInUseCount,
9710  bufferImageGranularity,
9711  allocSize,
9712  allocAlignment,
9713  allocType,
9714  suballocIt,
9715  canMakeOtherLost,
9716  &tmpAllocRequest.offset,
9717  &tmpAllocRequest.itemsToMakeLostCount,
9718  &tmpAllocRequest.sumFreeSize,
9719  &tmpAllocRequest.sumItemSize))
9720  {
9722  {
9723  *pAllocationRequest = tmpAllocRequest;
9724  pAllocationRequest->item = suballocIt;
9725  break;
9726  }
9727  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
9728  {
9729  *pAllocationRequest = tmpAllocRequest;
9730  pAllocationRequest->item = suballocIt;
9731  found = true;
9732  }
9733  }
9734  }
9735  }
9736 
9737  return found;
9738  }
9739 
9740  return false;
9741 }
9742 
9743 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
9744  uint32_t currentFrameIndex,
9745  uint32_t frameInUseCount,
9746  VmaAllocationRequest* pAllocationRequest)
9747 {
9748  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
9749 
9750  while(pAllocationRequest->itemsToMakeLostCount > 0)
9751  {
9752  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
9753  {
9754  ++pAllocationRequest->item;
9755  }
9756  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9757  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
9758  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
9759  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9760  {
9761  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
9762  --pAllocationRequest->itemsToMakeLostCount;
9763  }
9764  else
9765  {
9766  return false;
9767  }
9768  }
9769 
9770  VMA_HEAVY_ASSERT(Validate());
9771  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9772  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
9773 
9774  return true;
9775 }
9776 
9777 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9778 {
9779  uint32_t lostAllocationCount = 0;
9780  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9781  it != m_Suballocations.end();
9782  ++it)
9783  {
9784  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
9785  it->hAllocation->CanBecomeLost() &&
9786  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9787  {
9788  it = FreeSuballocation(it);
9789  ++lostAllocationCount;
9790  }
9791  }
9792  return lostAllocationCount;
9793 }
9794 
9795 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
9796 {
9797  for(auto& suballoc : m_Suballocations)
9798  {
9799  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9800  {
9801  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9802  {
9803  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9804  return VK_ERROR_VALIDATION_FAILED_EXT;
9805  }
9806  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9807  {
9808  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9809  return VK_ERROR_VALIDATION_FAILED_EXT;
9810  }
9811  }
9812  }
9813 
9814  return VK_SUCCESS;
9815 }
9816 
9817 void VmaBlockMetadata_Generic::Alloc(
9818  const VmaAllocationRequest& request,
9819  VmaSuballocationType type,
9820  VkDeviceSize allocSize,
9821  VmaAllocation hAllocation)
9822 {
9823  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
9824  VMA_ASSERT(request.item != m_Suballocations.end());
9825  VmaSuballocation& suballoc = *request.item;
9826  // Given suballocation is a free block.
9827  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9828  // Given offset is inside this suballocation.
9829  VMA_ASSERT(request.offset >= suballoc.offset);
9830  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
9831  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
9832  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
9833 
9834  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
9835  // it to become used.
9836  UnregisterFreeSuballocation(request.item);
9837 
9838  suballoc.offset = request.offset;
9839  suballoc.size = allocSize;
9840  suballoc.type = type;
9841  suballoc.hAllocation = hAllocation;
9842 
9843  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
9844  if(paddingEnd)
9845  {
9846  VmaSuballocation paddingSuballoc = {};
9847  paddingSuballoc.offset = request.offset + allocSize;
9848  paddingSuballoc.size = paddingEnd;
9849  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9850  VmaSuballocationList::iterator next = request.item;
9851  ++next;
9852  const VmaSuballocationList::iterator paddingEndItem =
9853  m_Suballocations.insert(next, paddingSuballoc);
9854  RegisterFreeSuballocation(paddingEndItem);
9855  }
9856 
9857  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
9858  if(paddingBegin)
9859  {
9860  VmaSuballocation paddingSuballoc = {};
9861  paddingSuballoc.offset = request.offset - paddingBegin;
9862  paddingSuballoc.size = paddingBegin;
9863  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9864  const VmaSuballocationList::iterator paddingBeginItem =
9865  m_Suballocations.insert(request.item, paddingSuballoc);
9866  RegisterFreeSuballocation(paddingBeginItem);
9867  }
9868 
9869  // Update totals.
9870  m_FreeCount = m_FreeCount - 1;
9871  if(paddingBegin > 0)
9872  {
9873  ++m_FreeCount;
9874  }
9875  if(paddingEnd > 0)
9876  {
9877  ++m_FreeCount;
9878  }
9879  m_SumFreeSize -= allocSize;
9880 }
9881 
9882 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
9883 {
9884  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9885  suballocItem != m_Suballocations.end();
9886  ++suballocItem)
9887  {
9888  VmaSuballocation& suballoc = *suballocItem;
9889  if(suballoc.hAllocation == allocation)
9890  {
9891  FreeSuballocation(suballocItem);
9892  VMA_HEAVY_ASSERT(Validate());
9893  return;
9894  }
9895  }
9896  VMA_ASSERT(0 && "Not found!");
9897 }
9898 
9899 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
9900 {
9901  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9902  suballocItem != m_Suballocations.end();
9903  ++suballocItem)
9904  {
9905  VmaSuballocation& suballoc = *suballocItem;
9906  if(suballoc.offset == offset)
9907  {
9908  FreeSuballocation(suballocItem);
9909  return;
9910  }
9911  }
9912  VMA_ASSERT(0 && "Not found!");
9913 }
9914 
9915 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
9916 {
9917  VkDeviceSize lastSize = 0;
9918  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
9919  {
9920  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
9921 
9922  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
9923  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9924  VMA_VALIDATE(it->size >= lastSize);
9925  lastSize = it->size;
9926  }
9927  return true;
9928 }
9929 
9930 bool VmaBlockMetadata_Generic::CheckAllocation(
9931  uint32_t currentFrameIndex,
9932  uint32_t frameInUseCount,
9933  VkDeviceSize bufferImageGranularity,
9934  VkDeviceSize allocSize,
9935  VkDeviceSize allocAlignment,
9936  VmaSuballocationType allocType,
9937  VmaSuballocationList::const_iterator suballocItem,
9938  bool canMakeOtherLost,
9939  VkDeviceSize* pOffset,
9940  size_t* itemsToMakeLostCount,
9941  VkDeviceSize* pSumFreeSize,
9942  VkDeviceSize* pSumItemSize) const
9943 {
9944  VMA_ASSERT(allocSize > 0);
9945  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9946  VMA_ASSERT(suballocItem != m_Suballocations.cend());
9947  VMA_ASSERT(pOffset != VMA_NULL);
9948 
9949  *itemsToMakeLostCount = 0;
9950  *pSumFreeSize = 0;
9951  *pSumItemSize = 0;
9952 
9953  if(canMakeOtherLost)
9954  {
9955  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9956  {
9957  *pSumFreeSize = suballocItem->size;
9958  }
9959  else
9960  {
9961  if(suballocItem->hAllocation->CanBecomeLost() &&
9962  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9963  {
9964  ++*itemsToMakeLostCount;
9965  *pSumItemSize = suballocItem->size;
9966  }
9967  else
9968  {
9969  return false;
9970  }
9971  }
9972 
9973  // Remaining size is too small for this request: Early return.
9974  if(GetSize() - suballocItem->offset < allocSize)
9975  {
9976  return false;
9977  }
9978 
9979  // Start from offset equal to beginning of this suballocation.
9980  *pOffset = suballocItem->offset;
9981 
9982  // Apply VMA_DEBUG_MARGIN at the beginning.
9983  if(VMA_DEBUG_MARGIN > 0)
9984  {
9985  *pOffset += VMA_DEBUG_MARGIN;
9986  }
9987 
9988  // Apply alignment.
9989  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9990 
9991  // Check previous suballocations for BufferImageGranularity conflicts.
9992  // Make bigger alignment if necessary.
9993  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
9994  {
9995  bool bufferImageGranularityConflict = false;
9996  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9997  while(prevSuballocItem != m_Suballocations.cbegin())
9998  {
9999  --prevSuballocItem;
10000  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
10001  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
10002  {
10003  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10004  {
10005  bufferImageGranularityConflict = true;
10006  break;
10007  }
10008  }
10009  else
10010  // Already on previous page.
10011  break;
10012  }
10013  if(bufferImageGranularityConflict)
10014  {
10015  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
10016  }
10017  }
10018 
10019  // Now that we have final *pOffset, check if we are past suballocItem.
10020  // If yes, return false - this function should be called for another suballocItem as starting point.
10021  if(*pOffset >= suballocItem->offset + suballocItem->size)
10022  {
10023  return false;
10024  }
10025 
10026  // Calculate padding at the beginning based on current offset.
10027  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
10028 
10029  // Calculate required margin at the end.
10030  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
10031 
10032  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
10033  // Another early return check.
10034  if(suballocItem->offset + totalSize > GetSize())
10035  {
10036  return false;
10037  }
10038 
10039  // Advance lastSuballocItem until desired size is reached.
10040  // Update itemsToMakeLostCount.
10041  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
10042  if(totalSize > suballocItem->size)
10043  {
10044  VkDeviceSize remainingSize = totalSize - suballocItem->size;
10045  while(remainingSize > 0)
10046  {
10047  ++lastSuballocItem;
10048  if(lastSuballocItem == m_Suballocations.cend())
10049  {
10050  return false;
10051  }
10052  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
10053  {
10054  *pSumFreeSize += lastSuballocItem->size;
10055  }
10056  else
10057  {
10058  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
10059  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
10060  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10061  {
10062  ++*itemsToMakeLostCount;
10063  *pSumItemSize += lastSuballocItem->size;
10064  }
10065  else
10066  {
10067  return false;
10068  }
10069  }
10070  remainingSize = (lastSuballocItem->size < remainingSize) ?
10071  remainingSize - lastSuballocItem->size : 0;
10072  }
10073  }
10074 
10075  // Check next suballocations for BufferImageGranularity conflicts.
10076  // If conflict exists, we must mark more allocations lost or fail.
10077  if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
10078  {
10079  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
10080  ++nextSuballocItem;
10081  while(nextSuballocItem != m_Suballocations.cend())
10082  {
10083  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
10084  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10085  {
10086  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10087  {
10088  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
10089  if(nextSuballoc.hAllocation->CanBecomeLost() &&
10090  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10091  {
10092  ++*itemsToMakeLostCount;
10093  }
10094  else
10095  {
10096  return false;
10097  }
10098  }
10099  }
10100  else
10101  {
10102  // Already on next page.
10103  break;
10104  }
10105  ++nextSuballocItem;
10106  }
10107  }
10108  }
10109  else
10110  {
10111  const VmaSuballocation& suballoc = *suballocItem;
10112  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10113 
10114  *pSumFreeSize = suballoc.size;
10115 
10116  // Size of this suballocation is too small for this request: Early return.
10117  if(suballoc.size < allocSize)
10118  {
10119  return false;
10120  }
10121 
10122  // Start from offset equal to beginning of this suballocation.
10123  *pOffset = suballoc.offset;
10124 
10125  // Apply VMA_DEBUG_MARGIN at the beginning.
10126  if(VMA_DEBUG_MARGIN > 0)
10127  {
10128  *pOffset += VMA_DEBUG_MARGIN;
10129  }
10130 
10131  // Apply alignment.
10132  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
10133 
10134  // Check previous suballocations for BufferImageGranularity conflicts.
10135  // Make bigger alignment if necessary.
10136  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
10137  {
10138  bool bufferImageGranularityConflict = false;
10139  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
10140  while(prevSuballocItem != m_Suballocations.cbegin())
10141  {
10142  --prevSuballocItem;
10143  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
10144  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
10145  {
10146  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10147  {
10148  bufferImageGranularityConflict = true;
10149  break;
10150  }
10151  }
10152  else
10153  // Already on previous page.
10154  break;
10155  }
10156  if(bufferImageGranularityConflict)
10157  {
10158  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
10159  }
10160  }
10161 
10162  // Calculate padding at the beginning based on current offset.
10163  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
10164 
10165  // Calculate required margin at the end.
10166  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
10167 
10168  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
10169  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
10170  {
10171  return false;
10172  }
10173 
10174  // Check next suballocations for BufferImageGranularity conflicts.
10175  // If conflict exists, allocation cannot be made here.
10176  if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
10177  {
10178  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
10179  ++nextSuballocItem;
10180  while(nextSuballocItem != m_Suballocations.cend())
10181  {
10182  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
10183  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10184  {
10185  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10186  {
10187  return false;
10188  }
10189  }
10190  else
10191  {
10192  // Already on next page.
10193  break;
10194  }
10195  ++nextSuballocItem;
10196  }
10197  }
10198  }
10199 
10200  // All tests passed: Success. pOffset is already filled.
10201  return true;
10202 }
10203 
10204 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
10205 {
10206  VMA_ASSERT(item != m_Suballocations.end());
10207  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
10208 
10209  VmaSuballocationList::iterator nextItem = item;
10210  ++nextItem;
10211  VMA_ASSERT(nextItem != m_Suballocations.end());
10212  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
10213 
10214  item->size += nextItem->size;
10215  --m_FreeCount;
10216  m_Suballocations.erase(nextItem);
10217 }
10218 
10219 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
10220 {
10221  // Change this suballocation to be marked as free.
10222  VmaSuballocation& suballoc = *suballocItem;
10223  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10224  suballoc.hAllocation = VK_NULL_HANDLE;
10225 
10226  // Update totals.
10227  ++m_FreeCount;
10228  m_SumFreeSize += suballoc.size;
10229 
10230  // Merge with previous and/or next suballocation if it's also free.
10231  bool mergeWithNext = false;
10232  bool mergeWithPrev = false;
10233 
10234  VmaSuballocationList::iterator nextItem = suballocItem;
10235  ++nextItem;
10236  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
10237  {
10238  mergeWithNext = true;
10239  }
10240 
10241  VmaSuballocationList::iterator prevItem = suballocItem;
10242  if(suballocItem != m_Suballocations.begin())
10243  {
10244  --prevItem;
10245  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
10246  {
10247  mergeWithPrev = true;
10248  }
10249  }
10250 
10251  if(mergeWithNext)
10252  {
10253  UnregisterFreeSuballocation(nextItem);
10254  MergeFreeWithNext(suballocItem);
10255  }
10256 
10257  if(mergeWithPrev)
10258  {
10259  UnregisterFreeSuballocation(prevItem);
10260  MergeFreeWithNext(prevItem);
10261  RegisterFreeSuballocation(prevItem);
10262  return prevItem;
10263  }
10264  else
10265  {
10266  RegisterFreeSuballocation(suballocItem);
10267  return suballocItem;
10268  }
10269 }
10270 
10271 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
10272 {
10273  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
10274  VMA_ASSERT(item->size > 0);
10275 
10276  // You may want to enable this validation at the beginning or at the end of
10277  // this function, depending on what do you want to check.
10278  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10279 
10280  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
10281  {
10282  if(m_FreeSuballocationsBySize.empty())
10283  {
10284  m_FreeSuballocationsBySize.push_back(item);
10285  }
10286  else
10287  {
10288  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
10289  }
10290  }
10291 
10292  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10293 }
10294 
10295 
10296 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
10297 {
10298  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
10299  VMA_ASSERT(item->size > 0);
10300 
10301  // You may want to enable this validation at the beginning or at the end of
10302  // this function, depending on what do you want to check.
10303  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10304 
10305  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
10306  {
10307  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
10308  m_FreeSuballocationsBySize.data(),
10309  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
10310  item,
10311  VmaSuballocationItemSizeLess());
10312  for(size_t index = it - m_FreeSuballocationsBySize.data();
10313  index < m_FreeSuballocationsBySize.size();
10314  ++index)
10315  {
10316  if(m_FreeSuballocationsBySize[index] == item)
10317  {
10318  VmaVectorRemove(m_FreeSuballocationsBySize, index);
10319  return;
10320  }
10321  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
10322  }
10323  VMA_ASSERT(0 && "Not found.");
10324  }
10325 
10326  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10327 }
10328 
10329 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
10330  VkDeviceSize bufferImageGranularity,
10331  VmaSuballocationType& inOutPrevSuballocType) const
10332 {
10333  if(bufferImageGranularity == 1 || IsEmpty())
10334  {
10335  return false;
10336  }
10337 
10338  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
10339  bool typeConflictFound = false;
10340  for(const auto& suballoc : m_Suballocations)
10341  {
10342  const VmaSuballocationType suballocType = suballoc.type;
10343  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
10344  {
10345  minAlignment = VMA_MIN(minAlignment, suballoc.hAllocation->GetAlignment());
10346  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
10347  {
10348  typeConflictFound = true;
10349  }
10350  inOutPrevSuballocType = suballocType;
10351  }
10352  }
10353 
10354  return typeConflictFound || minAlignment >= bufferImageGranularity;
10355 }
10356 
10358 // class VmaBlockMetadata_Linear
10359 
10360 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
10361  VmaBlockMetadata(hAllocator),
10362  m_SumFreeSize(0),
10363  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
10364  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
10365  m_1stVectorIndex(0),
10366  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
10367  m_1stNullItemsBeginCount(0),
10368  m_1stNullItemsMiddleCount(0),
10369  m_2ndNullItemsCount(0)
10370 {
10371 }
10372 
10373 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
10374 {
10375 }
10376 
10377 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
10378 {
10379  VmaBlockMetadata::Init(size);
10380  m_SumFreeSize = size;
10381 }
10382 
10383 bool VmaBlockMetadata_Linear::Validate() const
10384 {
10385  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10386  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10387 
10388  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
10389  VMA_VALIDATE(!suballocations1st.empty() ||
10390  suballocations2nd.empty() ||
10391  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
10392 
10393  if(!suballocations1st.empty())
10394  {
10395  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
10396  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
10397  // Null item at the end should be just pop_back().
10398  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
10399  }
10400  if(!suballocations2nd.empty())
10401  {
10402  // Null item at the end should be just pop_back().
10403  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
10404  }
10405 
10406  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
10407  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
10408 
10409  VkDeviceSize sumUsedSize = 0;
10410  const size_t suballoc1stCount = suballocations1st.size();
10411  VkDeviceSize offset = VMA_DEBUG_MARGIN;
10412 
10413  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10414  {
10415  const size_t suballoc2ndCount = suballocations2nd.size();
10416  size_t nullItem2ndCount = 0;
10417  for(size_t i = 0; i < suballoc2ndCount; ++i)
10418  {
10419  const VmaSuballocation& suballoc = suballocations2nd[i];
10420  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10421 
10422  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10423  VMA_VALIDATE(suballoc.offset >= offset);
10424 
10425  if(!currFree)
10426  {
10427  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10428  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10429  sumUsedSize += suballoc.size;
10430  }
10431  else
10432  {
10433  ++nullItem2ndCount;
10434  }
10435 
10436  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10437  }
10438 
10439  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10440  }
10441 
10442  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
10443  {
10444  const VmaSuballocation& suballoc = suballocations1st[i];
10445  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
10446  suballoc.hAllocation == VK_NULL_HANDLE);
10447  }
10448 
10449  size_t nullItem1stCount = m_1stNullItemsBeginCount;
10450 
10451  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
10452  {
10453  const VmaSuballocation& suballoc = suballocations1st[i];
10454  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10455 
10456  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10457  VMA_VALIDATE(suballoc.offset >= offset);
10458  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
10459 
10460  if(!currFree)
10461  {
10462  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10463  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10464  sumUsedSize += suballoc.size;
10465  }
10466  else
10467  {
10468  ++nullItem1stCount;
10469  }
10470 
10471  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10472  }
10473  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
10474 
10475  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10476  {
10477  const size_t suballoc2ndCount = suballocations2nd.size();
10478  size_t nullItem2ndCount = 0;
10479  for(size_t i = suballoc2ndCount; i--; )
10480  {
10481  const VmaSuballocation& suballoc = suballocations2nd[i];
10482  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10483 
10484  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10485  VMA_VALIDATE(suballoc.offset >= offset);
10486 
10487  if(!currFree)
10488  {
10489  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10490  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10491  sumUsedSize += suballoc.size;
10492  }
10493  else
10494  {
10495  ++nullItem2ndCount;
10496  }
10497 
10498  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10499  }
10500 
10501  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10502  }
10503 
10504  VMA_VALIDATE(offset <= GetSize());
10505  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
10506 
10507  return true;
10508 }
10509 
10510 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
10511 {
10512  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
10513  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
10514 }
10515 
10516 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
10517 {
10518  const VkDeviceSize size = GetSize();
10519 
10520  /*
10521  We don't consider gaps inside allocation vectors with freed allocations because
10522  they are not suitable for reuse in linear allocator. We consider only space that
10523  is available for new allocations.
10524  */
10525  if(IsEmpty())
10526  {
10527  return size;
10528  }
10529 
10530  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10531 
10532  switch(m_2ndVectorMode)
10533  {
10534  case SECOND_VECTOR_EMPTY:
10535  /*
10536  Available space is after end of 1st, as well as before beginning of 1st (which
10537  would make it a ring buffer).
10538  */
10539  {
10540  const size_t suballocations1stCount = suballocations1st.size();
10541  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
10542  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10543  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
10544  return VMA_MAX(
10545  firstSuballoc.offset,
10546  size - (lastSuballoc.offset + lastSuballoc.size));
10547  }
10548  break;
10549 
10550  case SECOND_VECTOR_RING_BUFFER:
10551  /*
10552  Available space is only between end of 2nd and beginning of 1st.
10553  */
10554  {
10555  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10556  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
10557  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
10558  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
10559  }
10560  break;
10561 
10562  case SECOND_VECTOR_DOUBLE_STACK:
10563  /*
10564  Available space is only between end of 1st and top of 2nd.
10565  */
10566  {
10567  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10568  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
10569  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
10570  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
10571  }
10572  break;
10573 
10574  default:
10575  VMA_ASSERT(0);
10576  return 0;
10577  }
10578 }
10579 
10580 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10581 {
10582  const VkDeviceSize size = GetSize();
10583  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10584  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10585  const size_t suballoc1stCount = suballocations1st.size();
10586  const size_t suballoc2ndCount = suballocations2nd.size();
10587 
10588  outInfo.blockCount = 1;
10589  outInfo.allocationCount = (uint32_t)GetAllocationCount();
10590  outInfo.unusedRangeCount = 0;
10591  outInfo.usedBytes = 0;
10592  outInfo.allocationSizeMin = UINT64_MAX;
10593  outInfo.allocationSizeMax = 0;
10594  outInfo.unusedRangeSizeMin = UINT64_MAX;
10595  outInfo.unusedRangeSizeMax = 0;
10596 
10597  VkDeviceSize lastOffset = 0;
10598 
10599  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10600  {
10601  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10602  size_t nextAlloc2ndIndex = 0;
10603  while(lastOffset < freeSpace2ndTo1stEnd)
10604  {
10605  // Find next non-null allocation or move nextAllocIndex to the end.
10606  while(nextAlloc2ndIndex < suballoc2ndCount &&
10607  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10608  {
10609  ++nextAlloc2ndIndex;
10610  }
10611 
10612  // Found non-null allocation.
10613  if(nextAlloc2ndIndex < suballoc2ndCount)
10614  {
10615  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10616 
10617  // 1. Process free space before this allocation.
10618  if(lastOffset < suballoc.offset)
10619  {
10620  // There is free space from lastOffset to suballoc.offset.
10621  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10622  ++outInfo.unusedRangeCount;
10623  outInfo.unusedBytes += unusedRangeSize;
10624  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10625  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10626  }
10627 
10628  // 2. Process this allocation.
10629  // There is allocation with suballoc.offset, suballoc.size.
10630  outInfo.usedBytes += suballoc.size;
10631  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10632  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10633 
10634  // 3. Prepare for next iteration.
10635  lastOffset = suballoc.offset + suballoc.size;
10636  ++nextAlloc2ndIndex;
10637  }
10638  // We are at the end.
10639  else
10640  {
10641  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10642  if(lastOffset < freeSpace2ndTo1stEnd)
10643  {
10644  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10645  ++outInfo.unusedRangeCount;
10646  outInfo.unusedBytes += unusedRangeSize;
10647  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10648  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10649  }
10650 
10651  // End of loop.
10652  lastOffset = freeSpace2ndTo1stEnd;
10653  }
10654  }
10655  }
10656 
10657  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10658  const VkDeviceSize freeSpace1stTo2ndEnd =
10659  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10660  while(lastOffset < freeSpace1stTo2ndEnd)
10661  {
10662  // Find next non-null allocation or move nextAllocIndex to the end.
10663  while(nextAlloc1stIndex < suballoc1stCount &&
10664  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10665  {
10666  ++nextAlloc1stIndex;
10667  }
10668 
10669  // Found non-null allocation.
10670  if(nextAlloc1stIndex < suballoc1stCount)
10671  {
10672  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10673 
10674  // 1. Process free space before this allocation.
10675  if(lastOffset < suballoc.offset)
10676  {
10677  // There is free space from lastOffset to suballoc.offset.
10678  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10679  ++outInfo.unusedRangeCount;
10680  outInfo.unusedBytes += unusedRangeSize;
10681  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10682  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10683  }
10684 
10685  // 2. Process this allocation.
10686  // There is allocation with suballoc.offset, suballoc.size.
10687  outInfo.usedBytes += suballoc.size;
10688  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10689  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10690 
10691  // 3. Prepare for next iteration.
10692  lastOffset = suballoc.offset + suballoc.size;
10693  ++nextAlloc1stIndex;
10694  }
10695  // We are at the end.
10696  else
10697  {
10698  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10699  if(lastOffset < freeSpace1stTo2ndEnd)
10700  {
10701  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10702  ++outInfo.unusedRangeCount;
10703  outInfo.unusedBytes += unusedRangeSize;
10704  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10705  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10706  }
10707 
10708  // End of loop.
10709  lastOffset = freeSpace1stTo2ndEnd;
10710  }
10711  }
10712 
10713  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10714  {
10715  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10716  while(lastOffset < size)
10717  {
10718  // Find next non-null allocation or move nextAllocIndex to the end.
10719  while(nextAlloc2ndIndex != SIZE_MAX &&
10720  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10721  {
10722  --nextAlloc2ndIndex;
10723  }
10724 
10725  // Found non-null allocation.
10726  if(nextAlloc2ndIndex != SIZE_MAX)
10727  {
10728  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10729 
10730  // 1. Process free space before this allocation.
10731  if(lastOffset < suballoc.offset)
10732  {
10733  // There is free space from lastOffset to suballoc.offset.
10734  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10735  ++outInfo.unusedRangeCount;
10736  outInfo.unusedBytes += unusedRangeSize;
10737  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10738  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10739  }
10740 
10741  // 2. Process this allocation.
10742  // There is allocation with suballoc.offset, suballoc.size.
10743  outInfo.usedBytes += suballoc.size;
10744  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10745  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10746 
10747  // 3. Prepare for next iteration.
10748  lastOffset = suballoc.offset + suballoc.size;
10749  --nextAlloc2ndIndex;
10750  }
10751  // We are at the end.
10752  else
10753  {
10754  // There is free space from lastOffset to size.
10755  if(lastOffset < size)
10756  {
10757  const VkDeviceSize unusedRangeSize = size - lastOffset;
10758  ++outInfo.unusedRangeCount;
10759  outInfo.unusedBytes += unusedRangeSize;
10760  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10761  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10762  }
10763 
10764  // End of loop.
10765  lastOffset = size;
10766  }
10767  }
10768  }
10769 
10770  outInfo.unusedBytes = size - outInfo.usedBytes;
10771 }
10772 
10773 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
10774 {
10775  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10776  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10777  const VkDeviceSize size = GetSize();
10778  const size_t suballoc1stCount = suballocations1st.size();
10779  const size_t suballoc2ndCount = suballocations2nd.size();
10780 
10781  inoutStats.size += size;
10782 
10783  VkDeviceSize lastOffset = 0;
10784 
10785  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10786  {
10787  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10788  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
10789  while(lastOffset < freeSpace2ndTo1stEnd)
10790  {
10791  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10792  while(nextAlloc2ndIndex < suballoc2ndCount &&
10793  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10794  {
10795  ++nextAlloc2ndIndex;
10796  }
10797 
10798  // Found non-null allocation.
10799  if(nextAlloc2ndIndex < suballoc2ndCount)
10800  {
10801  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10802 
10803  // 1. Process free space before this allocation.
10804  if(lastOffset < suballoc.offset)
10805  {
10806  // There is free space from lastOffset to suballoc.offset.
10807  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10808  inoutStats.unusedSize += unusedRangeSize;
10809  ++inoutStats.unusedRangeCount;
10810  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10811  }
10812 
10813  // 2. Process this allocation.
10814  // There is allocation with suballoc.offset, suballoc.size.
10815  ++inoutStats.allocationCount;
10816 
10817  // 3. Prepare for next iteration.
10818  lastOffset = suballoc.offset + suballoc.size;
10819  ++nextAlloc2ndIndex;
10820  }
10821  // We are at the end.
10822  else
10823  {
10824  if(lastOffset < freeSpace2ndTo1stEnd)
10825  {
10826  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10827  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10828  inoutStats.unusedSize += unusedRangeSize;
10829  ++inoutStats.unusedRangeCount;
10830  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10831  }
10832 
10833  // End of loop.
10834  lastOffset = freeSpace2ndTo1stEnd;
10835  }
10836  }
10837  }
10838 
10839  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10840  const VkDeviceSize freeSpace1stTo2ndEnd =
10841  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10842  while(lastOffset < freeSpace1stTo2ndEnd)
10843  {
10844  // Find next non-null allocation or move nextAllocIndex to the end.
10845  while(nextAlloc1stIndex < suballoc1stCount &&
10846  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10847  {
10848  ++nextAlloc1stIndex;
10849  }
10850 
10851  // Found non-null allocation.
10852  if(nextAlloc1stIndex < suballoc1stCount)
10853  {
10854  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10855 
10856  // 1. Process free space before this allocation.
10857  if(lastOffset < suballoc.offset)
10858  {
10859  // There is free space from lastOffset to suballoc.offset.
10860  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10861  inoutStats.unusedSize += unusedRangeSize;
10862  ++inoutStats.unusedRangeCount;
10863  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10864  }
10865 
10866  // 2. Process this allocation.
10867  // There is allocation with suballoc.offset, suballoc.size.
10868  ++inoutStats.allocationCount;
10869 
10870  // 3. Prepare for next iteration.
10871  lastOffset = suballoc.offset + suballoc.size;
10872  ++nextAlloc1stIndex;
10873  }
10874  // We are at the end.
10875  else
10876  {
10877  if(lastOffset < freeSpace1stTo2ndEnd)
10878  {
10879  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10880  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10881  inoutStats.unusedSize += unusedRangeSize;
10882  ++inoutStats.unusedRangeCount;
10883  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10884  }
10885 
10886  // End of loop.
10887  lastOffset = freeSpace1stTo2ndEnd;
10888  }
10889  }
10890 
10891  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10892  {
10893  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10894  while(lastOffset < size)
10895  {
10896  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10897  while(nextAlloc2ndIndex != SIZE_MAX &&
10898  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10899  {
10900  --nextAlloc2ndIndex;
10901  }
10902 
10903  // Found non-null allocation.
10904  if(nextAlloc2ndIndex != SIZE_MAX)
10905  {
10906  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10907 
10908  // 1. Process free space before this allocation.
10909  if(lastOffset < suballoc.offset)
10910  {
10911  // There is free space from lastOffset to suballoc.offset.
10912  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10913  inoutStats.unusedSize += unusedRangeSize;
10914  ++inoutStats.unusedRangeCount;
10915  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10916  }
10917 
10918  // 2. Process this allocation.
10919  // There is allocation with suballoc.offset, suballoc.size.
10920  ++inoutStats.allocationCount;
10921 
10922  // 3. Prepare for next iteration.
10923  lastOffset = suballoc.offset + suballoc.size;
10924  --nextAlloc2ndIndex;
10925  }
10926  // We are at the end.
10927  else
10928  {
10929  if(lastOffset < size)
10930  {
10931  // There is free space from lastOffset to size.
10932  const VkDeviceSize unusedRangeSize = size - lastOffset;
10933  inoutStats.unusedSize += unusedRangeSize;
10934  ++inoutStats.unusedRangeCount;
10935  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10936  }
10937 
10938  // End of loop.
10939  lastOffset = size;
10940  }
10941  }
10942  }
10943 }
10944 
10945 #if VMA_STATS_STRING_ENABLED
10946 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
10947 {
10948  const VkDeviceSize size = GetSize();
10949  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10950  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10951  const size_t suballoc1stCount = suballocations1st.size();
10952  const size_t suballoc2ndCount = suballocations2nd.size();
10953 
10954  // FIRST PASS
10955 
10956  size_t unusedRangeCount = 0;
10957  VkDeviceSize usedBytes = 0;
10958 
10959  VkDeviceSize lastOffset = 0;
10960 
10961  size_t alloc2ndCount = 0;
10962  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10963  {
10964  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10965  size_t nextAlloc2ndIndex = 0;
10966  while(lastOffset < freeSpace2ndTo1stEnd)
10967  {
10968  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10969  while(nextAlloc2ndIndex < suballoc2ndCount &&
10970  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10971  {
10972  ++nextAlloc2ndIndex;
10973  }
10974 
10975  // Found non-null allocation.
10976  if(nextAlloc2ndIndex < suballoc2ndCount)
10977  {
10978  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10979 
10980  // 1. Process free space before this allocation.
10981  if(lastOffset < suballoc.offset)
10982  {
10983  // There is free space from lastOffset to suballoc.offset.
10984  ++unusedRangeCount;
10985  }
10986 
10987  // 2. Process this allocation.
10988  // There is allocation with suballoc.offset, suballoc.size.
10989  ++alloc2ndCount;
10990  usedBytes += suballoc.size;
10991 
10992  // 3. Prepare for next iteration.
10993  lastOffset = suballoc.offset + suballoc.size;
10994  ++nextAlloc2ndIndex;
10995  }
10996  // We are at the end.
10997  else
10998  {
10999  if(lastOffset < freeSpace2ndTo1stEnd)
11000  {
11001  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
11002  ++unusedRangeCount;
11003  }
11004 
11005  // End of loop.
11006  lastOffset = freeSpace2ndTo1stEnd;
11007  }
11008  }
11009  }
11010 
11011  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
11012  size_t alloc1stCount = 0;
11013  const VkDeviceSize freeSpace1stTo2ndEnd =
11014  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
11015  while(lastOffset < freeSpace1stTo2ndEnd)
11016  {
11017  // Find next non-null allocation or move nextAllocIndex to the end.
11018  while(nextAlloc1stIndex < suballoc1stCount &&
11019  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
11020  {
11021  ++nextAlloc1stIndex;
11022  }
11023 
11024  // Found non-null allocation.
11025  if(nextAlloc1stIndex < suballoc1stCount)
11026  {
11027  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
11028 
11029  // 1. Process free space before this allocation.
11030  if(lastOffset < suballoc.offset)
11031  {
11032  // There is free space from lastOffset to suballoc.offset.
11033  ++unusedRangeCount;
11034  }
11035 
11036  // 2. Process this allocation.
11037  // There is allocation with suballoc.offset, suballoc.size.
11038  ++alloc1stCount;
11039  usedBytes += suballoc.size;
11040 
11041  // 3. Prepare for next iteration.
11042  lastOffset = suballoc.offset + suballoc.size;
11043  ++nextAlloc1stIndex;
11044  }
11045  // We are at the end.
11046  else
11047  {
11048  if(lastOffset < size)
11049  {
11050  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
11051  ++unusedRangeCount;
11052  }
11053 
11054  // End of loop.
11055  lastOffset = freeSpace1stTo2ndEnd;
11056  }
11057  }
11058 
11059  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11060  {
11061  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
11062  while(lastOffset < size)
11063  {
11064  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
11065  while(nextAlloc2ndIndex != SIZE_MAX &&
11066  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
11067  {
11068  --nextAlloc2ndIndex;
11069  }
11070 
11071  // Found non-null allocation.
11072  if(nextAlloc2ndIndex != SIZE_MAX)
11073  {
11074  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
11075 
11076  // 1. Process free space before this allocation.
11077  if(lastOffset < suballoc.offset)
11078  {
11079  // There is free space from lastOffset to suballoc.offset.
11080  ++unusedRangeCount;
11081  }
11082 
11083  // 2. Process this allocation.
11084  // There is allocation with suballoc.offset, suballoc.size.
11085  ++alloc2ndCount;
11086  usedBytes += suballoc.size;
11087 
11088  // 3. Prepare for next iteration.
11089  lastOffset = suballoc.offset + suballoc.size;
11090  --nextAlloc2ndIndex;
11091  }
11092  // We are at the end.
11093  else
11094  {
11095  if(lastOffset < size)
11096  {
11097  // There is free space from lastOffset to size.
11098  ++unusedRangeCount;
11099  }
11100 
11101  // End of loop.
11102  lastOffset = size;
11103  }
11104  }
11105  }
11106 
11107  const VkDeviceSize unusedBytes = size - usedBytes;
11108  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
11109 
11110  // SECOND PASS
11111  lastOffset = 0;
11112 
11113  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11114  {
11115  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
11116  size_t nextAlloc2ndIndex = 0;
11117  while(lastOffset < freeSpace2ndTo1stEnd)
11118  {
11119  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
11120  while(nextAlloc2ndIndex < suballoc2ndCount &&
11121  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
11122  {
11123  ++nextAlloc2ndIndex;
11124  }
11125 
11126  // Found non-null allocation.
11127  if(nextAlloc2ndIndex < suballoc2ndCount)
11128  {
11129  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
11130 
11131  // 1. Process free space before this allocation.
11132  if(lastOffset < suballoc.offset)
11133  {
11134  // There is free space from lastOffset to suballoc.offset.
11135  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
11136  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11137  }
11138 
11139  // 2. Process this allocation.
11140  // There is allocation with suballoc.offset, suballoc.size.
11141  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
11142 
11143  // 3. Prepare for next iteration.
11144  lastOffset = suballoc.offset + suballoc.size;
11145  ++nextAlloc2ndIndex;
11146  }
11147  // We are at the end.
11148  else
11149  {
11150  if(lastOffset < freeSpace2ndTo1stEnd)
11151  {
11152  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
11153  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
11154  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11155  }
11156 
11157  // End of loop.
11158  lastOffset = freeSpace2ndTo1stEnd;
11159  }
11160  }
11161  }
11162 
11163  nextAlloc1stIndex = m_1stNullItemsBeginCount;
11164  while(lastOffset < freeSpace1stTo2ndEnd)
11165  {
11166  // Find next non-null allocation or move nextAllocIndex to the end.
11167  while(nextAlloc1stIndex < suballoc1stCount &&
11168  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
11169  {
11170  ++nextAlloc1stIndex;
11171  }
11172 
11173  // Found non-null allocation.
11174  if(nextAlloc1stIndex < suballoc1stCount)
11175  {
11176  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
11177 
11178  // 1. Process free space before this allocation.
11179  if(lastOffset < suballoc.offset)
11180  {
11181  // There is free space from lastOffset to suballoc.offset.
11182  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
11183  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11184  }
11185 
11186  // 2. Process this allocation.
11187  // There is allocation with suballoc.offset, suballoc.size.
11188  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
11189 
11190  // 3. Prepare for next iteration.
11191  lastOffset = suballoc.offset + suballoc.size;
11192  ++nextAlloc1stIndex;
11193  }
11194  // We are at the end.
11195  else
11196  {
11197  if(lastOffset < freeSpace1stTo2ndEnd)
11198  {
11199  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
11200  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
11201  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11202  }
11203 
11204  // End of loop.
11205  lastOffset = freeSpace1stTo2ndEnd;
11206  }
11207  }
11208 
11209  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11210  {
11211  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
11212  while(lastOffset < size)
11213  {
11214  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
11215  while(nextAlloc2ndIndex != SIZE_MAX &&
11216  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
11217  {
11218  --nextAlloc2ndIndex;
11219  }
11220 
11221  // Found non-null allocation.
11222  if(nextAlloc2ndIndex != SIZE_MAX)
11223  {
11224  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
11225 
11226  // 1. Process free space before this allocation.
11227  if(lastOffset < suballoc.offset)
11228  {
11229  // There is free space from lastOffset to suballoc.offset.
11230  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
11231  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11232  }
11233 
11234  // 2. Process this allocation.
11235  // There is allocation with suballoc.offset, suballoc.size.
11236  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
11237 
11238  // 3. Prepare for next iteration.
11239  lastOffset = suballoc.offset + suballoc.size;
11240  --nextAlloc2ndIndex;
11241  }
11242  // We are at the end.
11243  else
11244  {
11245  if(lastOffset < size)
11246  {
11247  // There is free space from lastOffset to size.
11248  const VkDeviceSize unusedRangeSize = size - lastOffset;
11249  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11250  }
11251 
11252  // End of loop.
11253  lastOffset = size;
11254  }
11255  }
11256  }
11257 
11258  PrintDetailedMap_End(json);
11259 }
11260 #endif // #if VMA_STATS_STRING_ENABLED
11261 
11262 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
11263  uint32_t currentFrameIndex,
11264  uint32_t frameInUseCount,
11265  VkDeviceSize bufferImageGranularity,
11266  VkDeviceSize allocSize,
11267  VkDeviceSize allocAlignment,
11268  bool upperAddress,
11269  VmaSuballocationType allocType,
11270  bool canMakeOtherLost,
11271  uint32_t strategy,
11272  VmaAllocationRequest* pAllocationRequest)
11273 {
11274  VMA_ASSERT(allocSize > 0);
11275  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
11276  VMA_ASSERT(pAllocationRequest != VMA_NULL);
11277  VMA_HEAVY_ASSERT(Validate());
11278  return upperAddress ?
11279  CreateAllocationRequest_UpperAddress(
11280  currentFrameIndex, frameInUseCount, bufferImageGranularity,
11281  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
11282  CreateAllocationRequest_LowerAddress(
11283  currentFrameIndex, frameInUseCount, bufferImageGranularity,
11284  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
11285 }
11286 
11287 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
11288  uint32_t currentFrameIndex,
11289  uint32_t frameInUseCount,
11290  VkDeviceSize bufferImageGranularity,
11291  VkDeviceSize allocSize,
11292  VkDeviceSize allocAlignment,
11293  VmaSuballocationType allocType,
11294  bool canMakeOtherLost,
11295  uint32_t strategy,
11296  VmaAllocationRequest* pAllocationRequest)
11297 {
11298  const VkDeviceSize size = GetSize();
11299  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11300  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11301 
11302  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11303  {
11304  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
11305  return false;
11306  }
11307 
11308  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
11309  if(allocSize > size)
11310  {
11311  return false;
11312  }
11313  VkDeviceSize resultBaseOffset = size - allocSize;
11314  if(!suballocations2nd.empty())
11315  {
11316  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
11317  resultBaseOffset = lastSuballoc.offset - allocSize;
11318  if(allocSize > lastSuballoc.offset)
11319  {
11320  return false;
11321  }
11322  }
11323 
11324  // Start from offset equal to end of free space.
11325  VkDeviceSize resultOffset = resultBaseOffset;
11326 
11327  // Apply VMA_DEBUG_MARGIN at the end.
11328  if(VMA_DEBUG_MARGIN > 0)
11329  {
11330  if(resultOffset < VMA_DEBUG_MARGIN)
11331  {
11332  return false;
11333  }
11334  resultOffset -= VMA_DEBUG_MARGIN;
11335  }
11336 
11337  // Apply alignment.
11338  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
11339 
11340  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
11341  // Make bigger alignment if necessary.
11342  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
11343  {
11344  bool bufferImageGranularityConflict = false;
11345  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
11346  {
11347  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
11348  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11349  {
11350  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
11351  {
11352  bufferImageGranularityConflict = true;
11353  break;
11354  }
11355  }
11356  else
11357  // Already on previous page.
11358  break;
11359  }
11360  if(bufferImageGranularityConflict)
11361  {
11362  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
11363  }
11364  }
11365 
11366  // There is enough free space.
11367  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
11368  suballocations1st.back().offset + suballocations1st.back().size :
11369  0;
11370  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
11371  {
11372  // Check previous suballocations for BufferImageGranularity conflicts.
11373  // If conflict exists, allocation cannot be made here.
11374  if(bufferImageGranularity > 1)
11375  {
11376  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11377  {
11378  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11379  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11380  {
11381  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
11382  {
11383  return false;
11384  }
11385  }
11386  else
11387  {
11388  // Already on next page.
11389  break;
11390  }
11391  }
11392  }
11393 
11394  // All tests passed: Success.
11395  pAllocationRequest->offset = resultOffset;
11396  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
11397  pAllocationRequest->sumItemSize = 0;
11398  // pAllocationRequest->item unused.
11399  pAllocationRequest->itemsToMakeLostCount = 0;
11400  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
11401  return true;
11402  }
11403 
11404  return false;
11405 }
11406 
11407 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
11408  uint32_t currentFrameIndex,
11409  uint32_t frameInUseCount,
11410  VkDeviceSize bufferImageGranularity,
11411  VkDeviceSize allocSize,
11412  VkDeviceSize allocAlignment,
11413  VmaSuballocationType allocType,
11414  bool canMakeOtherLost,
11415  uint32_t strategy,
11416  VmaAllocationRequest* pAllocationRequest)
11417 {
11418  const VkDeviceSize size = GetSize();
11419  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11420  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11421 
11422  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11423  {
11424  // Try to allocate at the end of 1st vector.
11425 
11426  VkDeviceSize resultBaseOffset = 0;
11427  if(!suballocations1st.empty())
11428  {
11429  const VmaSuballocation& lastSuballoc = suballocations1st.back();
11430  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11431  }
11432 
11433  // Start from offset equal to beginning of free space.
11434  VkDeviceSize resultOffset = resultBaseOffset;
11435 
11436  // Apply VMA_DEBUG_MARGIN at the beginning.
11437  if(VMA_DEBUG_MARGIN > 0)
11438  {
11439  resultOffset += VMA_DEBUG_MARGIN;
11440  }
11441 
11442  // Apply alignment.
11443  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11444 
11445  // Check previous suballocations for BufferImageGranularity conflicts.
11446  // Make bigger alignment if necessary.
11447  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
11448  {
11449  bool bufferImageGranularityConflict = false;
11450  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11451  {
11452  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11453  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11454  {
11455  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11456  {
11457  bufferImageGranularityConflict = true;
11458  break;
11459  }
11460  }
11461  else
11462  // Already on previous page.
11463  break;
11464  }
11465  if(bufferImageGranularityConflict)
11466  {
11467  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11468  }
11469  }
11470 
11471  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
11472  suballocations2nd.back().offset : size;
11473 
11474  // There is enough free space at the end after alignment.
11475  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
11476  {
11477  // Check next suballocations for BufferImageGranularity conflicts.
11478  // If conflict exists, allocation cannot be made here.
11479  if((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11480  {
11481  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
11482  {
11483  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
11484  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11485  {
11486  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11487  {
11488  return false;
11489  }
11490  }
11491  else
11492  {
11493  // Already on previous page.
11494  break;
11495  }
11496  }
11497  }
11498 
11499  // All tests passed: Success.
11500  pAllocationRequest->offset = resultOffset;
11501  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
11502  pAllocationRequest->sumItemSize = 0;
11503  // pAllocationRequest->item, customData unused.
11504  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
11505  pAllocationRequest->itemsToMakeLostCount = 0;
11506  return true;
11507  }
11508  }
11509 
11510  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
11511  // beginning of 1st vector as the end of free space.
11512  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11513  {
11514  VMA_ASSERT(!suballocations1st.empty());
11515 
11516  VkDeviceSize resultBaseOffset = 0;
11517  if(!suballocations2nd.empty())
11518  {
11519  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
11520  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11521  }
11522 
11523  // Start from offset equal to beginning of free space.
11524  VkDeviceSize resultOffset = resultBaseOffset;
11525 
11526  // Apply VMA_DEBUG_MARGIN at the beginning.
11527  if(VMA_DEBUG_MARGIN > 0)
11528  {
11529  resultOffset += VMA_DEBUG_MARGIN;
11530  }
11531 
11532  // Apply alignment.
11533  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11534 
11535  // Check previous suballocations for BufferImageGranularity conflicts.
11536  // Make bigger alignment if necessary.
11537  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
11538  {
11539  bool bufferImageGranularityConflict = false;
11540  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
11541  {
11542  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
11543  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11544  {
11545  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11546  {
11547  bufferImageGranularityConflict = true;
11548  break;
11549  }
11550  }
11551  else
11552  // Already on previous page.
11553  break;
11554  }
11555  if(bufferImageGranularityConflict)
11556  {
11557  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11558  }
11559  }
11560 
11561  pAllocationRequest->itemsToMakeLostCount = 0;
11562  pAllocationRequest->sumItemSize = 0;
11563  size_t index1st = m_1stNullItemsBeginCount;
11564 
11565  if(canMakeOtherLost)
11566  {
11567  while(index1st < suballocations1st.size() &&
11568  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
11569  {
11570  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
11571  const VmaSuballocation& suballoc = suballocations1st[index1st];
11572  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
11573  {
11574  // No problem.
11575  }
11576  else
11577  {
11578  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11579  if(suballoc.hAllocation->CanBecomeLost() &&
11580  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11581  {
11582  ++pAllocationRequest->itemsToMakeLostCount;
11583  pAllocationRequest->sumItemSize += suballoc.size;
11584  }
11585  else
11586  {
11587  return false;
11588  }
11589  }
11590  ++index1st;
11591  }
11592 
11593  // Check next suballocations for BufferImageGranularity conflicts.
11594  // If conflict exists, we must mark more allocations lost or fail.
11595  if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
11596  {
11597  while(index1st < suballocations1st.size())
11598  {
11599  const VmaSuballocation& suballoc = suballocations1st[index1st];
11600  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
11601  {
11602  if(suballoc.hAllocation != VK_NULL_HANDLE)
11603  {
11604  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
11605  if(suballoc.hAllocation->CanBecomeLost() &&
11606  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11607  {
11608  ++pAllocationRequest->itemsToMakeLostCount;
11609  pAllocationRequest->sumItemSize += suballoc.size;
11610  }
11611  else
11612  {
11613  return false;
11614  }
11615  }
11616  }
11617  else
11618  {
11619  // Already on next page.
11620  break;
11621  }
11622  ++index1st;
11623  }
11624  }
11625 
11626  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
11627  if(index1st == suballocations1st.size() &&
11628  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
11629  {
11630  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
11631  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
11632  }
11633  }
11634 
11635  // There is enough free space at the end after alignment.
11636  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
11637  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
11638  {
11639  // Check next suballocations for BufferImageGranularity conflicts.
11640  // If conflict exists, allocation cannot be made here.
11641  if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
11642  {
11643  for(size_t nextSuballocIndex = index1st;
11644  nextSuballocIndex < suballocations1st.size();
11645  nextSuballocIndex++)
11646  {
11647  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
11648  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11649  {
11650  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11651  {
11652  return false;
11653  }
11654  }
11655  else
11656  {
11657  // Already on next page.
11658  break;
11659  }
11660  }
11661  }
11662 
11663  // All tests passed: Success.
11664  pAllocationRequest->offset = resultOffset;
11665  pAllocationRequest->sumFreeSize =
11666  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
11667  - resultBaseOffset
11668  - pAllocationRequest->sumItemSize;
11669  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
11670  // pAllocationRequest->item, customData unused.
11671  return true;
11672  }
11673  }
11674 
11675  return false;
11676 }
11677 
11678 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
11679  uint32_t currentFrameIndex,
11680  uint32_t frameInUseCount,
11681  VmaAllocationRequest* pAllocationRequest)
11682 {
11683  if(pAllocationRequest->itemsToMakeLostCount == 0)
11684  {
11685  return true;
11686  }
11687 
11688  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
11689 
11690  // We always start from 1st.
11691  SuballocationVectorType* suballocations = &AccessSuballocations1st();
11692  size_t index = m_1stNullItemsBeginCount;
11693  size_t madeLostCount = 0;
11694  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
11695  {
11696  if(index == suballocations->size())
11697  {
11698  index = 0;
11699  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
11700  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11701  {
11702  suballocations = &AccessSuballocations2nd();
11703  }
11704  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
11705  // suballocations continues pointing at AccessSuballocations1st().
11706  VMA_ASSERT(!suballocations->empty());
11707  }
11708  VmaSuballocation& suballoc = (*suballocations)[index];
11709  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11710  {
11711  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11712  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
11713  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11714  {
11715  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11716  suballoc.hAllocation = VK_NULL_HANDLE;
11717  m_SumFreeSize += suballoc.size;
11718  if(suballocations == &AccessSuballocations1st())
11719  {
11720  ++m_1stNullItemsMiddleCount;
11721  }
11722  else
11723  {
11724  ++m_2ndNullItemsCount;
11725  }
11726  ++madeLostCount;
11727  }
11728  else
11729  {
11730  return false;
11731  }
11732  }
11733  ++index;
11734  }
11735 
11736  CleanupAfterFree();
11737  //VMA_HEAVY_ASSERT(Validate()); // Already called by CleanupAfterFree().
11738 
11739  return true;
11740 }
11741 
11742 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11743 {
11744  uint32_t lostAllocationCount = 0;
11745 
11746  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11747  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11748  {
11749  VmaSuballocation& suballoc = suballocations1st[i];
11750  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11751  suballoc.hAllocation->CanBecomeLost() &&
11752  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11753  {
11754  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11755  suballoc.hAllocation = VK_NULL_HANDLE;
11756  ++m_1stNullItemsMiddleCount;
11757  m_SumFreeSize += suballoc.size;
11758  ++lostAllocationCount;
11759  }
11760  }
11761 
11762  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11763  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11764  {
11765  VmaSuballocation& suballoc = suballocations2nd[i];
11766  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11767  suballoc.hAllocation->CanBecomeLost() &&
11768  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11769  {
11770  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11771  suballoc.hAllocation = VK_NULL_HANDLE;
11772  ++m_2ndNullItemsCount;
11773  m_SumFreeSize += suballoc.size;
11774  ++lostAllocationCount;
11775  }
11776  }
11777 
11778  if(lostAllocationCount)
11779  {
11780  CleanupAfterFree();
11781  }
11782 
11783  return lostAllocationCount;
11784 }
11785 
11786 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
11787 {
11788  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11789  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11790  {
11791  const VmaSuballocation& suballoc = suballocations1st[i];
11792  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11793  {
11794  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11795  {
11796  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11797  return VK_ERROR_VALIDATION_FAILED_EXT;
11798  }
11799  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11800  {
11801  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11802  return VK_ERROR_VALIDATION_FAILED_EXT;
11803  }
11804  }
11805  }
11806 
11807  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11808  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11809  {
11810  const VmaSuballocation& suballoc = suballocations2nd[i];
11811  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11812  {
11813  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11814  {
11815  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11816  return VK_ERROR_VALIDATION_FAILED_EXT;
11817  }
11818  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11819  {
11820  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11821  return VK_ERROR_VALIDATION_FAILED_EXT;
11822  }
11823  }
11824  }
11825 
11826  return VK_SUCCESS;
11827 }
11828 
11829 void VmaBlockMetadata_Linear::Alloc(
11830  const VmaAllocationRequest& request,
11831  VmaSuballocationType type,
11832  VkDeviceSize allocSize,
11833  VmaAllocation hAllocation)
11834 {
11835  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
11836 
11837  switch(request.type)
11838  {
11839  case VmaAllocationRequestType::UpperAddress:
11840  {
11841  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
11842  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
11843  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11844  suballocations2nd.push_back(newSuballoc);
11845  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
11846  }
11847  break;
11848  case VmaAllocationRequestType::EndOf1st:
11849  {
11850  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11851 
11852  VMA_ASSERT(suballocations1st.empty() ||
11853  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
11854  // Check if it fits before the end of the block.
11855  VMA_ASSERT(request.offset + allocSize <= GetSize());
11856 
11857  suballocations1st.push_back(newSuballoc);
11858  }
11859  break;
11860  case VmaAllocationRequestType::EndOf2nd:
11861  {
11862  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11863  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
11864  VMA_ASSERT(!suballocations1st.empty() &&
11865  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
11866  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11867 
11868  switch(m_2ndVectorMode)
11869  {
11870  case SECOND_VECTOR_EMPTY:
11871  // First allocation from second part ring buffer.
11872  VMA_ASSERT(suballocations2nd.empty());
11873  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
11874  break;
11875  case SECOND_VECTOR_RING_BUFFER:
11876  // 2-part ring buffer is already started.
11877  VMA_ASSERT(!suballocations2nd.empty());
11878  break;
11879  case SECOND_VECTOR_DOUBLE_STACK:
11880  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
11881  break;
11882  default:
11883  VMA_ASSERT(0);
11884  }
11885 
11886  suballocations2nd.push_back(newSuballoc);
11887  }
11888  break;
11889  default:
11890  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
11891  }
11892 
11893  m_SumFreeSize -= newSuballoc.size;
11894 }
11895 
11896 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
11897 {
11898  FreeAtOffset(allocation->GetOffset());
11899 }
11900 
11901 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
11902 {
11903  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11904  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11905 
11906  if(!suballocations1st.empty())
11907  {
11908  // First allocation: Mark it as next empty at the beginning.
11909  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
11910  if(firstSuballoc.offset == offset)
11911  {
11912  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11913  firstSuballoc.hAllocation = VK_NULL_HANDLE;
11914  m_SumFreeSize += firstSuballoc.size;
11915  ++m_1stNullItemsBeginCount;
11916  CleanupAfterFree();
11917  return;
11918  }
11919  }
11920 
11921  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
11922  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
11923  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11924  {
11925  VmaSuballocation& lastSuballoc = suballocations2nd.back();
11926  if(lastSuballoc.offset == offset)
11927  {
11928  m_SumFreeSize += lastSuballoc.size;
11929  suballocations2nd.pop_back();
11930  CleanupAfterFree();
11931  return;
11932  }
11933  }
11934  // Last allocation in 1st vector.
11935  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
11936  {
11937  VmaSuballocation& lastSuballoc = suballocations1st.back();
11938  if(lastSuballoc.offset == offset)
11939  {
11940  m_SumFreeSize += lastSuballoc.size;
11941  suballocations1st.pop_back();
11942  CleanupAfterFree();
11943  return;
11944  }
11945  }
11946 
11947  // Item from the middle of 1st vector.
11948  {
11949  VmaSuballocation refSuballoc;
11950  refSuballoc.offset = offset;
11951  // Rest of members stays uninitialized intentionally for better performance.
11952  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11953  suballocations1st.begin() + m_1stNullItemsBeginCount,
11954  suballocations1st.end(),
11955  refSuballoc,
11956  VmaSuballocationOffsetLess());
11957  if(it != suballocations1st.end())
11958  {
11959  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11960  it->hAllocation = VK_NULL_HANDLE;
11961  ++m_1stNullItemsMiddleCount;
11962  m_SumFreeSize += it->size;
11963  CleanupAfterFree();
11964  return;
11965  }
11966  }
11967 
11968  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11969  {
11970  // Item from the middle of 2nd vector.
11971  VmaSuballocation refSuballoc;
11972  refSuballoc.offset = offset;
11973  // Rest of members stays uninitialized intentionally for better performance.
11974  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11975  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11976  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11977  if(it != suballocations2nd.end())
11978  {
11979  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11980  it->hAllocation = VK_NULL_HANDLE;
11981  ++m_2ndNullItemsCount;
11982  m_SumFreeSize += it->size;
11983  CleanupAfterFree();
11984  return;
11985  }
11986  }
11987 
11988  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11989 }
11990 
11991 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11992 {
11993  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11994  const size_t suballocCount = AccessSuballocations1st().size();
11995  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11996 }
11997 
11998 void VmaBlockMetadata_Linear::CleanupAfterFree()
11999 {
12000  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
12001  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
12002 
12003  if(IsEmpty())
12004  {
12005  suballocations1st.clear();
12006  suballocations2nd.clear();
12007  m_1stNullItemsBeginCount = 0;
12008  m_1stNullItemsMiddleCount = 0;
12009  m_2ndNullItemsCount = 0;
12010  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
12011  }
12012  else
12013  {
12014  const size_t suballoc1stCount = suballocations1st.size();
12015  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
12016  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
12017 
12018  // Find more null items at the beginning of 1st vector.
12019  while(m_1stNullItemsBeginCount < suballoc1stCount &&
12020  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
12021  {
12022  ++m_1stNullItemsBeginCount;
12023  --m_1stNullItemsMiddleCount;
12024  }
12025 
12026  // Find more null items at the end of 1st vector.
12027  while(m_1stNullItemsMiddleCount > 0 &&
12028  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
12029  {
12030  --m_1stNullItemsMiddleCount;
12031  suballocations1st.pop_back();
12032  }
12033 
12034  // Find more null items at the end of 2nd vector.
12035  while(m_2ndNullItemsCount > 0 &&
12036  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
12037  {
12038  --m_2ndNullItemsCount;
12039  suballocations2nd.pop_back();
12040  }
12041 
12042  // Find more null items at the beginning of 2nd vector.
12043  while(m_2ndNullItemsCount > 0 &&
12044  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
12045  {
12046  --m_2ndNullItemsCount;
12047  VmaVectorRemove(suballocations2nd, 0);
12048  }
12049 
12050  if(ShouldCompact1st())
12051  {
12052  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
12053  size_t srcIndex = m_1stNullItemsBeginCount;
12054  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
12055  {
12056  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
12057  {
12058  ++srcIndex;
12059  }
12060  if(dstIndex != srcIndex)
12061  {
12062  suballocations1st[dstIndex] = suballocations1st[srcIndex];
12063  }
12064  ++srcIndex;
12065  }
12066  suballocations1st.resize(nonNullItemCount);
12067  m_1stNullItemsBeginCount = 0;
12068  m_1stNullItemsMiddleCount = 0;
12069  }
12070 
12071  // 2nd vector became empty.
12072  if(suballocations2nd.empty())
12073  {
12074  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
12075  }
12076 
12077  // 1st vector became empty.
12078  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
12079  {
12080  suballocations1st.clear();
12081  m_1stNullItemsBeginCount = 0;
12082 
12083  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
12084  {
12085  // Swap 1st with 2nd. Now 2nd is empty.
12086  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
12087  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
12088  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
12089  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
12090  {
12091  ++m_1stNullItemsBeginCount;
12092  --m_1stNullItemsMiddleCount;
12093  }
12094  m_2ndNullItemsCount = 0;
12095  m_1stVectorIndex ^= 1;
12096  }
12097  }
12098  }
12099 
12100  VMA_HEAVY_ASSERT(Validate());
12101 }
12102 
12103 
12105 // class VmaBlockMetadata_Buddy
12106 
12107 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
12108  VmaBlockMetadata(hAllocator),
12109  m_Root(VMA_NULL),
12110  m_AllocationCount(0),
12111  m_FreeCount(1),
12112  m_SumFreeSize(0)
12113 {
12114  memset(m_FreeList, 0, sizeof(m_FreeList));
12115 }
12116 
12117 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
12118 {
12119  DeleteNode(m_Root);
12120 }
12121 
12122 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
12123 {
12124  VmaBlockMetadata::Init(size);
12125 
12126  m_UsableSize = VmaPrevPow2(size);
12127  m_SumFreeSize = m_UsableSize;
12128 
12129  // Calculate m_LevelCount.
12130  m_LevelCount = 1;
12131  while(m_LevelCount < MAX_LEVELS &&
12132  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
12133  {
12134  ++m_LevelCount;
12135  }
12136 
12137  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
12138  rootNode->offset = 0;
12139  rootNode->type = Node::TYPE_FREE;
12140  rootNode->parent = VMA_NULL;
12141  rootNode->buddy = VMA_NULL;
12142 
12143  m_Root = rootNode;
12144  AddToFreeListFront(0, rootNode);
12145 }
12146 
12147 bool VmaBlockMetadata_Buddy::Validate() const
12148 {
12149  // Validate tree.
12150  ValidationContext ctx;
12151  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
12152  {
12153  VMA_VALIDATE(false && "ValidateNode failed.");
12154  }
12155  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
12156  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
12157 
12158  // Validate free node lists.
12159  for(uint32_t level = 0; level < m_LevelCount; ++level)
12160  {
12161  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
12162  m_FreeList[level].front->free.prev == VMA_NULL);
12163 
12164  for(Node* node = m_FreeList[level].front;
12165  node != VMA_NULL;
12166  node = node->free.next)
12167  {
12168  VMA_VALIDATE(node->type == Node::TYPE_FREE);
12169 
12170  if(node->free.next == VMA_NULL)
12171  {
12172  VMA_VALIDATE(m_FreeList[level].back == node);
12173  }
12174  else
12175  {
12176  VMA_VALIDATE(node->free.next->free.prev == node);
12177  }
12178  }
12179  }
12180 
12181  // Validate that free lists ar higher levels are empty.
12182  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
12183  {
12184  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
12185  }
12186 
12187  return true;
12188 }
12189 
12190 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
12191 {
12192  for(uint32_t level = 0; level < m_LevelCount; ++level)
12193  {
12194  if(m_FreeList[level].front != VMA_NULL)
12195  {
12196  return LevelToNodeSize(level);
12197  }
12198  }
12199  return 0;
12200 }
12201 
12202 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
12203 {
12204  const VkDeviceSize unusableSize = GetUnusableSize();
12205 
12206  outInfo.blockCount = 1;
12207 
12208  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
12209  outInfo.usedBytes = outInfo.unusedBytes = 0;
12210 
12211  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
12212  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
12213  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
12214 
12215  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
12216 
12217  if(unusableSize > 0)
12218  {
12219  ++outInfo.unusedRangeCount;
12220  outInfo.unusedBytes += unusableSize;
12221  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
12222  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
12223  }
12224 }
12225 
12226 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
12227 {
12228  const VkDeviceSize unusableSize = GetUnusableSize();
12229 
12230  inoutStats.size += GetSize();
12231  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
12232  inoutStats.allocationCount += m_AllocationCount;
12233  inoutStats.unusedRangeCount += m_FreeCount;
12234  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
12235 
12236  if(unusableSize > 0)
12237  {
12238  ++inoutStats.unusedRangeCount;
12239  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
12240  }
12241 }
12242 
12243 #if VMA_STATS_STRING_ENABLED
12244 
12245 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
12246 {
12247  // TODO optimize
12248  VmaStatInfo stat;
12249  CalcAllocationStatInfo(stat);
12250 
12251  PrintDetailedMap_Begin(
12252  json,
12253  stat.unusedBytes,
12254  stat.allocationCount,
12255  stat.unusedRangeCount);
12256 
12257  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
12258 
12259  const VkDeviceSize unusableSize = GetUnusableSize();
12260  if(unusableSize > 0)
12261  {
12262  PrintDetailedMap_UnusedRange(json,
12263  m_UsableSize, // offset
12264  unusableSize); // size
12265  }
12266 
12267  PrintDetailedMap_End(json);
12268 }
12269 
12270 #endif // #if VMA_STATS_STRING_ENABLED
12271 
12272 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
12273  uint32_t currentFrameIndex,
12274  uint32_t frameInUseCount,
12275  VkDeviceSize bufferImageGranularity,
12276  VkDeviceSize allocSize,
12277  VkDeviceSize allocAlignment,
12278  bool upperAddress,
12279  VmaSuballocationType allocType,
12280  bool canMakeOtherLost,
12281  uint32_t strategy,
12282  VmaAllocationRequest* pAllocationRequest)
12283 {
12284  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
12285 
12286  // Simple way to respect bufferImageGranularity. May be optimized some day.
12287  // Whenever it might be an OPTIMAL image...
12288  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
12289  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
12290  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
12291  {
12292  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
12293  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
12294  }
12295 
12296  if(allocSize > m_UsableSize)
12297  {
12298  return false;
12299  }
12300 
12301  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
12302  for(uint32_t level = targetLevel + 1; level--; )
12303  {
12304  for(Node* freeNode = m_FreeList[level].front;
12305  freeNode != VMA_NULL;
12306  freeNode = freeNode->free.next)
12307  {
12308  if(freeNode->offset % allocAlignment == 0)
12309  {
12310  pAllocationRequest->type = VmaAllocationRequestType::Normal;
12311  pAllocationRequest->offset = freeNode->offset;
12312  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
12313  pAllocationRequest->sumItemSize = 0;
12314  pAllocationRequest->itemsToMakeLostCount = 0;
12315  pAllocationRequest->customData = (void*)(uintptr_t)level;
12316  return true;
12317  }
12318  }
12319  }
12320 
12321  return false;
12322 }
12323 
12324 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
12325  uint32_t currentFrameIndex,
12326  uint32_t frameInUseCount,
12327  VmaAllocationRequest* pAllocationRequest)
12328 {
12329  /*
12330  Lost allocations are not supported in buddy allocator at the moment.
12331  Support might be added in the future.
12332  */
12333  return pAllocationRequest->itemsToMakeLostCount == 0;
12334 }
12335 
12336 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
12337 {
12338  /*
12339  Lost allocations are not supported in buddy allocator at the moment.
12340  Support might be added in the future.
12341  */
12342  return 0;
12343 }
12344 
12345 void VmaBlockMetadata_Buddy::Alloc(
12346  const VmaAllocationRequest& request,
12347  VmaSuballocationType type,
12348  VkDeviceSize allocSize,
12349  VmaAllocation hAllocation)
12350 {
12351  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
12352 
12353  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
12354  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
12355 
12356  Node* currNode = m_FreeList[currLevel].front;
12357  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
12358  while(currNode->offset != request.offset)
12359  {
12360  currNode = currNode->free.next;
12361  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
12362  }
12363 
12364  // Go down, splitting free nodes.
12365  while(currLevel < targetLevel)
12366  {
12367  // currNode is already first free node at currLevel.
12368  // Remove it from list of free nodes at this currLevel.
12369  RemoveFromFreeList(currLevel, currNode);
12370 
12371  const uint32_t childrenLevel = currLevel + 1;
12372 
12373  // Create two free sub-nodes.
12374  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
12375  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
12376 
12377  leftChild->offset = currNode->offset;
12378  leftChild->type = Node::TYPE_FREE;
12379  leftChild->parent = currNode;
12380  leftChild->buddy = rightChild;
12381 
12382  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
12383  rightChild->type = Node::TYPE_FREE;
12384  rightChild->parent = currNode;
12385  rightChild->buddy = leftChild;
12386 
12387  // Convert current currNode to split type.
12388  currNode->type = Node::TYPE_SPLIT;
12389  currNode->split.leftChild = leftChild;
12390 
12391  // Add child nodes to free list. Order is important!
12392  AddToFreeListFront(childrenLevel, rightChild);
12393  AddToFreeListFront(childrenLevel, leftChild);
12394 
12395  ++m_FreeCount;
12396  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
12397  ++currLevel;
12398  currNode = m_FreeList[currLevel].front;
12399 
12400  /*
12401  We can be sure that currNode, as left child of node previously split,
12402  also fullfills the alignment requirement.
12403  */
12404  }
12405 
12406  // Remove from free list.
12407  VMA_ASSERT(currLevel == targetLevel &&
12408  currNode != VMA_NULL &&
12409  currNode->type == Node::TYPE_FREE);
12410  RemoveFromFreeList(currLevel, currNode);
12411 
12412  // Convert to allocation node.
12413  currNode->type = Node::TYPE_ALLOCATION;
12414  currNode->allocation.alloc = hAllocation;
12415 
12416  ++m_AllocationCount;
12417  --m_FreeCount;
12418  m_SumFreeSize -= allocSize;
12419 }
12420 
12421 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
12422 {
12423  if(node->type == Node::TYPE_SPLIT)
12424  {
12425  DeleteNode(node->split.leftChild->buddy);
12426  DeleteNode(node->split.leftChild);
12427  }
12428 
12429  vma_delete(GetAllocationCallbacks(), node);
12430 }
12431 
12432 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
12433 {
12434  VMA_VALIDATE(level < m_LevelCount);
12435  VMA_VALIDATE(curr->parent == parent);
12436  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
12437  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
12438  switch(curr->type)
12439  {
12440  case Node::TYPE_FREE:
12441  // curr->free.prev, next are validated separately.
12442  ctx.calculatedSumFreeSize += levelNodeSize;
12443  ++ctx.calculatedFreeCount;
12444  break;
12445  case Node::TYPE_ALLOCATION:
12446  ++ctx.calculatedAllocationCount;
12447  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
12448  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
12449  break;
12450  case Node::TYPE_SPLIT:
12451  {
12452  const uint32_t childrenLevel = level + 1;
12453  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
12454  const Node* const leftChild = curr->split.leftChild;
12455  VMA_VALIDATE(leftChild != VMA_NULL);
12456  VMA_VALIDATE(leftChild->offset == curr->offset);
12457  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
12458  {
12459  VMA_VALIDATE(false && "ValidateNode for left child failed.");
12460  }
12461  const Node* const rightChild = leftChild->buddy;
12462  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
12463  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
12464  {
12465  VMA_VALIDATE(false && "ValidateNode for right child failed.");
12466  }
12467  }
12468  break;
12469  default:
12470  return false;
12471  }
12472 
12473  return true;
12474 }
12475 
12476 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
12477 {
12478  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
12479  uint32_t level = 0;
12480  VkDeviceSize currLevelNodeSize = m_UsableSize;
12481  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
12482  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
12483  {
12484  ++level;
12485  currLevelNodeSize = nextLevelNodeSize;
12486  nextLevelNodeSize = currLevelNodeSize >> 1;
12487  }
12488  return level;
12489 }
12490 
12491 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
12492 {
12493  // Find node and level.
12494  Node* node = m_Root;
12495  VkDeviceSize nodeOffset = 0;
12496  uint32_t level = 0;
12497  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
12498  while(node->type == Node::TYPE_SPLIT)
12499  {
12500  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
12501  if(offset < nodeOffset + nextLevelSize)
12502  {
12503  node = node->split.leftChild;
12504  }
12505  else
12506  {
12507  node = node->split.leftChild->buddy;
12508  nodeOffset += nextLevelSize;
12509  }
12510  ++level;
12511  levelNodeSize = nextLevelSize;
12512  }
12513 
12514  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
12515  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
12516 
12517  ++m_FreeCount;
12518  --m_AllocationCount;
12519  m_SumFreeSize += alloc->GetSize();
12520 
12521  node->type = Node::TYPE_FREE;
12522 
12523  // Join free nodes if possible.
12524  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
12525  {
12526  RemoveFromFreeList(level, node->buddy);
12527  Node* const parent = node->parent;
12528 
12529  vma_delete(GetAllocationCallbacks(), node->buddy);
12530  vma_delete(GetAllocationCallbacks(), node);
12531  parent->type = Node::TYPE_FREE;
12532 
12533  node = parent;
12534  --level;
12535  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
12536  --m_FreeCount;
12537  }
12538 
12539  AddToFreeListFront(level, node);
12540 }
12541 
12542 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
12543 {
12544  switch(node->type)
12545  {
12546  case Node::TYPE_FREE:
12547  ++outInfo.unusedRangeCount;
12548  outInfo.unusedBytes += levelNodeSize;
12549  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
12550  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
12551  break;
12552  case Node::TYPE_ALLOCATION:
12553  {
12554  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12555  ++outInfo.allocationCount;
12556  outInfo.usedBytes += allocSize;
12557  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
12558  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
12559 
12560  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
12561  if(unusedRangeSize > 0)
12562  {
12563  ++outInfo.unusedRangeCount;
12564  outInfo.unusedBytes += unusedRangeSize;
12565  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
12566  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
12567  }
12568  }
12569  break;
12570  case Node::TYPE_SPLIT:
12571  {
12572  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12573  const Node* const leftChild = node->split.leftChild;
12574  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
12575  const Node* const rightChild = leftChild->buddy;
12576  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
12577  }
12578  break;
12579  default:
12580  VMA_ASSERT(0);
12581  }
12582 }
12583 
12584 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
12585 {
12586  VMA_ASSERT(node->type == Node::TYPE_FREE);
12587 
12588  // List is empty.
12589  Node* const frontNode = m_FreeList[level].front;
12590  if(frontNode == VMA_NULL)
12591  {
12592  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
12593  node->free.prev = node->free.next = VMA_NULL;
12594  m_FreeList[level].front = m_FreeList[level].back = node;
12595  }
12596  else
12597  {
12598  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
12599  node->free.prev = VMA_NULL;
12600  node->free.next = frontNode;
12601  frontNode->free.prev = node;
12602  m_FreeList[level].front = node;
12603  }
12604 }
12605 
12606 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
12607 {
12608  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
12609 
12610  // It is at the front.
12611  if(node->free.prev == VMA_NULL)
12612  {
12613  VMA_ASSERT(m_FreeList[level].front == node);
12614  m_FreeList[level].front = node->free.next;
12615  }
12616  else
12617  {
12618  Node* const prevFreeNode = node->free.prev;
12619  VMA_ASSERT(prevFreeNode->free.next == node);
12620  prevFreeNode->free.next = node->free.next;
12621  }
12622 
12623  // It is at the back.
12624  if(node->free.next == VMA_NULL)
12625  {
12626  VMA_ASSERT(m_FreeList[level].back == node);
12627  m_FreeList[level].back = node->free.prev;
12628  }
12629  else
12630  {
12631  Node* const nextFreeNode = node->free.next;
12632  VMA_ASSERT(nextFreeNode->free.prev == node);
12633  nextFreeNode->free.prev = node->free.prev;
12634  }
12635 }
12636 
12637 #if VMA_STATS_STRING_ENABLED
12638 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
12639 {
12640  switch(node->type)
12641  {
12642  case Node::TYPE_FREE:
12643  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
12644  break;
12645  case Node::TYPE_ALLOCATION:
12646  {
12647  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
12648  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12649  if(allocSize < levelNodeSize)
12650  {
12651  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
12652  }
12653  }
12654  break;
12655  case Node::TYPE_SPLIT:
12656  {
12657  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12658  const Node* const leftChild = node->split.leftChild;
12659  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
12660  const Node* const rightChild = leftChild->buddy;
12661  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
12662  }
12663  break;
12664  default:
12665  VMA_ASSERT(0);
12666  }
12667 }
12668 #endif // #if VMA_STATS_STRING_ENABLED
12669 
12670 
12672 // class VmaDeviceMemoryBlock
12673 
12674 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
12675  m_pMetadata(VMA_NULL),
12676  m_MemoryTypeIndex(UINT32_MAX),
12677  m_Id(0),
12678  m_hMemory(VK_NULL_HANDLE),
12679  m_MapCount(0),
12680  m_pMappedData(VMA_NULL)
12681 {
12682 }
12683 
12684 void VmaDeviceMemoryBlock::Init(
12685  VmaAllocator hAllocator,
12686  VmaPool hParentPool,
12687  uint32_t newMemoryTypeIndex,
12688  VkDeviceMemory newMemory,
12689  VkDeviceSize newSize,
12690  uint32_t id,
12691  uint32_t algorithm)
12692 {
12693  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
12694 
12695  m_hParentPool = hParentPool;
12696  m_MemoryTypeIndex = newMemoryTypeIndex;
12697  m_Id = id;
12698  m_hMemory = newMemory;
12699 
12700  switch(algorithm)
12701  {
12703  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
12704  break;
12706  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
12707  break;
12708  default:
12709  VMA_ASSERT(0);
12710  // Fall-through.
12711  case 0:
12712  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
12713  }
12714  m_pMetadata->Init(newSize);
12715 }
12716 
12717 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
12718 {
12719  // This is the most important assert in the entire library.
12720  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
12721  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
12722 
12723  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
12724  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
12725  m_hMemory = VK_NULL_HANDLE;
12726 
12727  vma_delete(allocator, m_pMetadata);
12728  m_pMetadata = VMA_NULL;
12729 }
12730 
12731 bool VmaDeviceMemoryBlock::Validate() const
12732 {
12733  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
12734  (m_pMetadata->GetSize() != 0));
12735 
12736  return m_pMetadata->Validate();
12737 }
12738 
12739 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
12740 {
12741  void* pData = nullptr;
12742  VkResult res = Map(hAllocator, 1, &pData);
12743  if(res != VK_SUCCESS)
12744  {
12745  return res;
12746  }
12747 
12748  res = m_pMetadata->CheckCorruption(pData);
12749 
12750  Unmap(hAllocator, 1);
12751 
12752  return res;
12753 }
12754 
12755 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
12756 {
12757  if(count == 0)
12758  {
12759  return VK_SUCCESS;
12760  }
12761 
12762  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12763  if(m_MapCount != 0)
12764  {
12765  m_MapCount += count;
12766  VMA_ASSERT(m_pMappedData != VMA_NULL);
12767  if(ppData != VMA_NULL)
12768  {
12769  *ppData = m_pMappedData;
12770  }
12771  return VK_SUCCESS;
12772  }
12773  else
12774  {
12775  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
12776  hAllocator->m_hDevice,
12777  m_hMemory,
12778  0, // offset
12779  VK_WHOLE_SIZE,
12780  0, // flags
12781  &m_pMappedData);
12782  if(result == VK_SUCCESS)
12783  {
12784  if(ppData != VMA_NULL)
12785  {
12786  *ppData = m_pMappedData;
12787  }
12788  m_MapCount = count;
12789  }
12790  return result;
12791  }
12792 }
12793 
12794 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
12795 {
12796  if(count == 0)
12797  {
12798  return;
12799  }
12800 
12801  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12802  if(m_MapCount >= count)
12803  {
12804  m_MapCount -= count;
12805  if(m_MapCount == 0)
12806  {
12807  m_pMappedData = VMA_NULL;
12808  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
12809  }
12810  }
12811  else
12812  {
12813  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
12814  }
12815 }
12816 
12817 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12818 {
12819  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12820  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12821 
12822  void* pData;
12823  VkResult res = Map(hAllocator, 1, &pData);
12824  if(res != VK_SUCCESS)
12825  {
12826  return res;
12827  }
12828 
12829  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
12830  VmaWriteMagicValue(pData, allocOffset + allocSize);
12831 
12832  Unmap(hAllocator, 1);
12833 
12834  return VK_SUCCESS;
12835 }
12836 
12837 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12838 {
12839  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12840  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12841 
12842  void* pData;
12843  VkResult res = Map(hAllocator, 1, &pData);
12844  if(res != VK_SUCCESS)
12845  {
12846  return res;
12847  }
12848 
12849  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
12850  {
12851  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
12852  }
12853  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
12854  {
12855  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
12856  }
12857 
12858  Unmap(hAllocator, 1);
12859 
12860  return VK_SUCCESS;
12861 }
12862 
12863 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
12864  const VmaAllocator hAllocator,
12865  const VmaAllocation hAllocation,
12866  VkDeviceSize allocationLocalOffset,
12867  VkBuffer hBuffer,
12868  const void* pNext)
12869 {
12870  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12871  hAllocation->GetBlock() == this);
12872  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12873  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12874  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12875  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12876  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12877  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
12878 }
12879 
12880 VkResult VmaDeviceMemoryBlock::BindImageMemory(
12881  const VmaAllocator hAllocator,
12882  const VmaAllocation hAllocation,
12883  VkDeviceSize allocationLocalOffset,
12884  VkImage hImage,
12885  const void* pNext)
12886 {
12887  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12888  hAllocation->GetBlock() == this);
12889  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12890  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12891  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12892  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12893  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12894  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
12895 }
12896 
12897 static void InitStatInfo(VmaStatInfo& outInfo)
12898 {
12899  memset(&outInfo, 0, sizeof(outInfo));
12900  outInfo.allocationSizeMin = UINT64_MAX;
12901  outInfo.unusedRangeSizeMin = UINT64_MAX;
12902 }
12903 
12904 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
12905 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
12906 {
12907  inoutInfo.blockCount += srcInfo.blockCount;
12908  inoutInfo.allocationCount += srcInfo.allocationCount;
12909  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
12910  inoutInfo.usedBytes += srcInfo.usedBytes;
12911  inoutInfo.unusedBytes += srcInfo.unusedBytes;
12912  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
12913  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
12914  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
12915  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
12916 }
12917 
12918 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
12919 {
12920  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
12921  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
12922  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
12923  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
12924 }
12925 
12926 VmaPool_T::VmaPool_T(
12927  VmaAllocator hAllocator,
12928  const VmaPoolCreateInfo& createInfo,
12929  VkDeviceSize preferredBlockSize) :
12930  m_BlockVector(
12931  hAllocator,
12932  this, // hParentPool
12933  createInfo.memoryTypeIndex,
12934  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
12935  createInfo.minBlockCount,
12936  createInfo.maxBlockCount,
12937  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
12938  createInfo.frameInUseCount,
12939  createInfo.blockSize != 0, // explicitBlockSize
12940  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
12941  createInfo.priority,
12942  VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
12943  createInfo.pMemoryAllocateNext),
12944  m_Id(0),
12945  m_Name(VMA_NULL)
12946 {
12947 }
12948 
12949 VmaPool_T::~VmaPool_T()
12950 {
12951  VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
12952 }
12953 
12954 void VmaPool_T::SetName(const char* pName)
12955 {
12956  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12957  VmaFreeString(allocs, m_Name);
12958 
12959  if(pName != VMA_NULL)
12960  {
12961  m_Name = VmaCreateStringCopy(allocs, pName);
12962  }
12963  else
12964  {
12965  m_Name = VMA_NULL;
12966  }
12967 }
12968 
12969 #if VMA_STATS_STRING_ENABLED
12970 
12971 #endif // #if VMA_STATS_STRING_ENABLED
12972 
12973 VmaBlockVector::VmaBlockVector(
12974  VmaAllocator hAllocator,
12975  VmaPool hParentPool,
12976  uint32_t memoryTypeIndex,
12977  VkDeviceSize preferredBlockSize,
12978  size_t minBlockCount,
12979  size_t maxBlockCount,
12980  VkDeviceSize bufferImageGranularity,
12981  uint32_t frameInUseCount,
12982  bool explicitBlockSize,
12983  uint32_t algorithm,
12984  float priority,
12985  VkDeviceSize minAllocationAlignment,
12986  void* pMemoryAllocateNext) :
12987  m_hAllocator(hAllocator),
12988  m_hParentPool(hParentPool),
12989  m_MemoryTypeIndex(memoryTypeIndex),
12990  m_PreferredBlockSize(preferredBlockSize),
12991  m_MinBlockCount(minBlockCount),
12992  m_MaxBlockCount(maxBlockCount),
12993  m_BufferImageGranularity(bufferImageGranularity),
12994  m_FrameInUseCount(frameInUseCount),
12995  m_ExplicitBlockSize(explicitBlockSize),
12996  m_Algorithm(algorithm),
12997  m_Priority(priority),
12998  m_MinAllocationAlignment(minAllocationAlignment),
12999  m_pMemoryAllocateNext(pMemoryAllocateNext),
13000  m_HasEmptyBlock(false),
13001  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
13002  m_NextBlockId(0)
13003 {
13004 }
13005 
13006 VmaBlockVector::~VmaBlockVector()
13007 {
13008  for(size_t i = m_Blocks.size(); i--; )
13009  {
13010  m_Blocks[i]->Destroy(m_hAllocator);
13011  vma_delete(m_hAllocator, m_Blocks[i]);
13012  }
13013 }
13014 
13015 VkResult VmaBlockVector::CreateMinBlocks()
13016 {
13017  for(size_t i = 0; i < m_MinBlockCount; ++i)
13018  {
13019  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
13020  if(res != VK_SUCCESS)
13021  {
13022  return res;
13023  }
13024  }
13025  return VK_SUCCESS;
13026 }
13027 
13028 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
13029 {
13030  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13031 
13032  const size_t blockCount = m_Blocks.size();
13033 
13034  pStats->size = 0;
13035  pStats->unusedSize = 0;
13036  pStats->allocationCount = 0;
13037  pStats->unusedRangeCount = 0;
13038  pStats->unusedRangeSizeMax = 0;
13039  pStats->blockCount = blockCount;
13040 
13041  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13042  {
13043  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13044  VMA_ASSERT(pBlock);
13045  VMA_HEAVY_ASSERT(pBlock->Validate());
13046  pBlock->m_pMetadata->AddPoolStats(*pStats);
13047  }
13048 }
13049 
13050 bool VmaBlockVector::IsEmpty()
13051 {
13052  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13053  return m_Blocks.empty();
13054 }
13055 
13056 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
13057 {
13058  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13059  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
13060  (VMA_DEBUG_MARGIN > 0) &&
13061  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
13062  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
13063 }
13064 
13065 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
13066 
13067 VkResult VmaBlockVector::Allocate(
13068  uint32_t currentFrameIndex,
13069  VkDeviceSize size,
13070  VkDeviceSize alignment,
13071  const VmaAllocationCreateInfo& createInfo,
13072  VmaSuballocationType suballocType,
13073  size_t allocationCount,
13074  VmaAllocation* pAllocations)
13075 {
13076  size_t allocIndex;
13077  VkResult res = VK_SUCCESS;
13078 
13079  alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
13080 
13081  if(IsCorruptionDetectionEnabled())
13082  {
13083  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
13084  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
13085  }
13086 
13087  {
13088  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13089  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13090  {
13091  res = AllocatePage(
13092  currentFrameIndex,
13093  size,
13094  alignment,
13095  createInfo,
13096  suballocType,
13097  pAllocations + allocIndex);
13098  if(res != VK_SUCCESS)
13099  {
13100  break;
13101  }
13102  }
13103  }
13104 
13105  if(res != VK_SUCCESS)
13106  {
13107  // Free all already created allocations.
13108  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13109  while(allocIndex--)
13110  {
13111  VmaAllocation_T* const alloc = pAllocations[allocIndex];
13112  const VkDeviceSize allocSize = alloc->GetSize();
13113  Free(alloc);
13114  m_hAllocator->m_Budget.RemoveAllocation(heapIndex, allocSize);
13115  }
13116  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
13117  }
13118 
13119  return res;
13120 }
13121 
13122 VkResult VmaBlockVector::AllocatePage(
13123  uint32_t currentFrameIndex,
13124  VkDeviceSize size,
13125  VkDeviceSize alignment,
13126  const VmaAllocationCreateInfo& createInfo,
13127  VmaSuballocationType suballocType,
13128  VmaAllocation* pAllocation)
13129 {
13130  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13131  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
13132  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13133  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13134 
13135  VkDeviceSize freeMemory;
13136  {
13137  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13138  VmaBudget heapBudget = {};
13139  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
13140  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
13141  }
13142 
13143  const bool canFallbackToDedicated = !IsCustomPool();
13144  const bool canCreateNewBlock =
13145  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
13146  (m_Blocks.size() < m_MaxBlockCount) &&
13147  (freeMemory >= size || !canFallbackToDedicated);
13148  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
13149 
13150  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
13151  // Which in turn is available only when maxBlockCount = 1.
13152  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
13153  {
13154  canMakeOtherLost = false;
13155  }
13156 
13157  // Upper address can only be used with linear allocator and within single memory block.
13158  if(isUpperAddress &&
13159  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
13160  {
13161  return VK_ERROR_FEATURE_NOT_PRESENT;
13162  }
13163 
13164  // Validate strategy.
13165  switch(strategy)
13166  {
13167  case 0:
13169  break;
13173  break;
13174  default:
13175  return VK_ERROR_FEATURE_NOT_PRESENT;
13176  }
13177 
13178  // Early reject: requested allocation size is larger that maximum block size for this block vector.
13179  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
13180  {
13181  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13182  }
13183 
13184  /*
13185  Under certain condition, this whole section can be skipped for optimization, so
13186  we move on directly to trying to allocate with canMakeOtherLost. That's the case
13187  e.g. for custom pools with linear algorithm.
13188  */
13189  if(!canMakeOtherLost || canCreateNewBlock)
13190  {
13191  // 1. Search existing allocations. Try to allocate without making other allocations lost.
13192  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
13194 
13195  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13196  {
13197  // Use only last block.
13198  if(!m_Blocks.empty())
13199  {
13200  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
13201  VMA_ASSERT(pCurrBlock);
13202  VkResult res = AllocateFromBlock(
13203  pCurrBlock,
13204  currentFrameIndex,
13205  size,
13206  alignment,
13207  allocFlagsCopy,
13208  createInfo.pUserData,
13209  suballocType,
13210  strategy,
13211  pAllocation);
13212  if(res == VK_SUCCESS)
13213  {
13214  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
13215  return VK_SUCCESS;
13216  }
13217  }
13218  }
13219  else
13220  {
13222  {
13223  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
13224  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
13225  {
13226  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13227  VMA_ASSERT(pCurrBlock);
13228  VkResult res = AllocateFromBlock(
13229  pCurrBlock,
13230  currentFrameIndex,
13231  size,
13232  alignment,
13233  allocFlagsCopy,
13234  createInfo.pUserData,
13235  suballocType,
13236  strategy,
13237  pAllocation);
13238  if(res == VK_SUCCESS)
13239  {
13240  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
13241  return VK_SUCCESS;
13242  }
13243  }
13244  }
13245  else // WORST_FIT, FIRST_FIT
13246  {
13247  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
13248  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13249  {
13250  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13251  VMA_ASSERT(pCurrBlock);
13252  VkResult res = AllocateFromBlock(
13253  pCurrBlock,
13254  currentFrameIndex,
13255  size,
13256  alignment,
13257  allocFlagsCopy,
13258  createInfo.pUserData,
13259  suballocType,
13260  strategy,
13261  pAllocation);
13262  if(res == VK_SUCCESS)
13263  {
13264  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
13265  return VK_SUCCESS;
13266  }
13267  }
13268  }
13269  }
13270 
13271  // 2. Try to create new block.
13272  if(canCreateNewBlock)
13273  {
13274  // Calculate optimal size for new block.
13275  VkDeviceSize newBlockSize = m_PreferredBlockSize;
13276  uint32_t newBlockSizeShift = 0;
13277  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
13278 
13279  if(!m_ExplicitBlockSize)
13280  {
13281  // Allocate 1/8, 1/4, 1/2 as first blocks.
13282  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
13283  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
13284  {
13285  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
13286  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
13287  {
13288  newBlockSize = smallerNewBlockSize;
13289  ++newBlockSizeShift;
13290  }
13291  else
13292  {
13293  break;
13294  }
13295  }
13296  }
13297 
13298  size_t newBlockIndex = 0;
13299  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
13300  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
13301  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
13302  if(!m_ExplicitBlockSize)
13303  {
13304  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
13305  {
13306  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
13307  if(smallerNewBlockSize >= size)
13308  {
13309  newBlockSize = smallerNewBlockSize;
13310  ++newBlockSizeShift;
13311  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
13312  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
13313  }
13314  else
13315  {
13316  break;
13317  }
13318  }
13319  }
13320 
13321  if(res == VK_SUCCESS)
13322  {
13323  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
13324  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
13325 
13326  res = AllocateFromBlock(
13327  pBlock,
13328  currentFrameIndex,
13329  size,
13330  alignment,
13331  allocFlagsCopy,
13332  createInfo.pUserData,
13333  suballocType,
13334  strategy,
13335  pAllocation);
13336  if(res == VK_SUCCESS)
13337  {
13338  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
13339  return VK_SUCCESS;
13340  }
13341  else
13342  {
13343  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
13344  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13345  }
13346  }
13347  }
13348  }
13349 
13350  // 3. Try to allocate from existing blocks with making other allocations lost.
13351  if(canMakeOtherLost)
13352  {
13353  uint32_t tryIndex = 0;
13354  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
13355  {
13356  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
13357  VmaAllocationRequest bestRequest = {};
13358  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
13359 
13360  // 1. Search existing allocations.
13362  {
13363  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
13364  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
13365  {
13366  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13367  VMA_ASSERT(pCurrBlock);
13368  VmaAllocationRequest currRequest = {};
13369  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
13370  currentFrameIndex,
13371  m_FrameInUseCount,
13372  m_BufferImageGranularity,
13373  size,
13374  alignment,
13375  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
13376  suballocType,
13377  canMakeOtherLost,
13378  strategy,
13379  &currRequest))
13380  {
13381  const VkDeviceSize currRequestCost = currRequest.CalcCost();
13382  if(pBestRequestBlock == VMA_NULL ||
13383  currRequestCost < bestRequestCost)
13384  {
13385  pBestRequestBlock = pCurrBlock;
13386  bestRequest = currRequest;
13387  bestRequestCost = currRequestCost;
13388 
13389  if(bestRequestCost == 0)
13390  {
13391  break;
13392  }
13393  }
13394  }
13395  }
13396  }
13397  else // WORST_FIT, FIRST_FIT
13398  {
13399  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
13400  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13401  {
13402  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13403  VMA_ASSERT(pCurrBlock);
13404  VmaAllocationRequest currRequest = {};
13405  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
13406  currentFrameIndex,
13407  m_FrameInUseCount,
13408  m_BufferImageGranularity,
13409  size,
13410  alignment,
13411  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
13412  suballocType,
13413  canMakeOtherLost,
13414  strategy,
13415  &currRequest))
13416  {
13417  const VkDeviceSize currRequestCost = currRequest.CalcCost();
13418  if(pBestRequestBlock == VMA_NULL ||
13419  currRequestCost < bestRequestCost ||
13421  {
13422  pBestRequestBlock = pCurrBlock;
13423  bestRequest = currRequest;
13424  bestRequestCost = currRequestCost;
13425 
13426  if(bestRequestCost == 0 ||
13428  {
13429  break;
13430  }
13431  }
13432  }
13433  }
13434  }
13435 
13436  if(pBestRequestBlock != VMA_NULL)
13437  {
13438  if(mapped)
13439  {
13440  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
13441  if(res != VK_SUCCESS)
13442  {
13443  return res;
13444  }
13445  }
13446 
13447  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
13448  currentFrameIndex,
13449  m_FrameInUseCount,
13450  &bestRequest))
13451  {
13452  // Allocate from this pBlock.
13453  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13454  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
13455  UpdateHasEmptyBlock();
13456  (*pAllocation)->InitBlockAllocation(
13457  pBestRequestBlock,
13458  bestRequest.offset,
13459  alignment,
13460  size,
13461  m_MemoryTypeIndex,
13462  suballocType,
13463  mapped,
13464  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13465  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
13466  VMA_DEBUG_LOG(" Returned from existing block");
13467  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
13468  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13469  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13470  {
13471  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13472  }
13473  if(IsCorruptionDetectionEnabled())
13474  {
13475  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
13476  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13477  }
13478  return VK_SUCCESS;
13479  }
13480  // else: Some allocations must have been touched while we are here. Next try.
13481  }
13482  else
13483  {
13484  // Could not find place in any of the blocks - break outer loop.
13485  break;
13486  }
13487  }
13488  /* Maximum number of tries exceeded - a very unlike event when many other
13489  threads are simultaneously touching allocations making it impossible to make
13490  lost at the same time as we try to allocate. */
13491  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
13492  {
13493  return VK_ERROR_TOO_MANY_OBJECTS;
13494  }
13495  }
13496 
13497  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13498 }
13499 
13500 void VmaBlockVector::Free(
13501  const VmaAllocation hAllocation)
13502 {
13503  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
13504 
13505  bool budgetExceeded = false;
13506  {
13507  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13508  VmaBudget heapBudget = {};
13509  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
13510  budgetExceeded = heapBudget.usage >= heapBudget.budget;
13511  }
13512 
13513  // Scope for lock.
13514  {
13515  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13516 
13517  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13518 
13519  if(IsCorruptionDetectionEnabled())
13520  {
13521  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
13522  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
13523  }
13524 
13525  if(hAllocation->IsPersistentMap())
13526  {
13527  pBlock->Unmap(m_hAllocator, 1);
13528  }
13529 
13530  pBlock->m_pMetadata->Free(hAllocation);
13531  VMA_HEAVY_ASSERT(pBlock->Validate());
13532 
13533  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
13534 
13535  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
13536  // pBlock became empty after this deallocation.
13537  if(pBlock->m_pMetadata->IsEmpty())
13538  {
13539  // Already has empty block. We don't want to have two, so delete this one.
13540  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
13541  {
13542  pBlockToDelete = pBlock;
13543  Remove(pBlock);
13544  }
13545  // else: We now have an empty block - leave it.
13546  }
13547  // pBlock didn't become empty, but we have another empty block - find and free that one.
13548  // (This is optional, heuristics.)
13549  else if(m_HasEmptyBlock && canDeleteBlock)
13550  {
13551  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
13552  if(pLastBlock->m_pMetadata->IsEmpty())
13553  {
13554  pBlockToDelete = pLastBlock;
13555  m_Blocks.pop_back();
13556  }
13557  }
13558 
13559  UpdateHasEmptyBlock();
13560  IncrementallySortBlocks();
13561  }
13562 
13563  // Destruction of a free block. Deferred until this point, outside of mutex
13564  // lock, for performance reason.
13565  if(pBlockToDelete != VMA_NULL)
13566  {
13567  VMA_DEBUG_LOG(" Deleted empty block");
13568  pBlockToDelete->Destroy(m_hAllocator);
13569  vma_delete(m_hAllocator, pBlockToDelete);
13570  }
13571 }
13572 
13573 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
13574 {
13575  VkDeviceSize result = 0;
13576  for(size_t i = m_Blocks.size(); i--; )
13577  {
13578  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
13579  if(result >= m_PreferredBlockSize)
13580  {
13581  break;
13582  }
13583  }
13584  return result;
13585 }
13586 
13587 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
13588 {
13589  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13590  {
13591  if(m_Blocks[blockIndex] == pBlock)
13592  {
13593  VmaVectorRemove(m_Blocks, blockIndex);
13594  return;
13595  }
13596  }
13597  VMA_ASSERT(0);
13598 }
13599 
13600 void VmaBlockVector::IncrementallySortBlocks()
13601 {
13602  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13603  {
13604  // Bubble sort only until first swap.
13605  for(size_t i = 1; i < m_Blocks.size(); ++i)
13606  {
13607  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
13608  {
13609  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
13610  return;
13611  }
13612  }
13613  }
13614 }
13615 
13616 VkResult VmaBlockVector::AllocateFromBlock(
13617  VmaDeviceMemoryBlock* pBlock,
13618  uint32_t currentFrameIndex,
13619  VkDeviceSize size,
13620  VkDeviceSize alignment,
13621  VmaAllocationCreateFlags allocFlags,
13622  void* pUserData,
13623  VmaSuballocationType suballocType,
13624  uint32_t strategy,
13625  VmaAllocation* pAllocation)
13626 {
13627  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
13628  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13629  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13630  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13631 
13632  VmaAllocationRequest currRequest = {};
13633  if(pBlock->m_pMetadata->CreateAllocationRequest(
13634  currentFrameIndex,
13635  m_FrameInUseCount,
13636  m_BufferImageGranularity,
13637  size,
13638  alignment,
13639  isUpperAddress,
13640  suballocType,
13641  false, // canMakeOtherLost
13642  strategy,
13643  &currRequest))
13644  {
13645  // Allocate from pCurrBlock.
13646  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
13647 
13648  if(mapped)
13649  {
13650  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
13651  if(res != VK_SUCCESS)
13652  {
13653  return res;
13654  }
13655  }
13656 
13657  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13658  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
13659  UpdateHasEmptyBlock();
13660  (*pAllocation)->InitBlockAllocation(
13661  pBlock,
13662  currRequest.offset,
13663  alignment,
13664  size,
13665  m_MemoryTypeIndex,
13666  suballocType,
13667  mapped,
13668  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13669  VMA_HEAVY_ASSERT(pBlock->Validate());
13670  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
13671  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13672  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13673  {
13674  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13675  }
13676  if(IsCorruptionDetectionEnabled())
13677  {
13678  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
13679  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13680  }
13681  return VK_SUCCESS;
13682  }
13683  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13684 }
13685 
13686 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
13687 {
13688  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
13689  allocInfo.pNext = m_pMemoryAllocateNext;
13690  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
13691  allocInfo.allocationSize = blockSize;
13692 
13693 #if VMA_BUFFER_DEVICE_ADDRESS
13694  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
13695  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
13696  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
13697  {
13698  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
13699  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
13700  }
13701 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
13702 
13703 #if VMA_MEMORY_PRIORITY
13704  VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
13705  if(m_hAllocator->m_UseExtMemoryPriority)
13706  {
13707  priorityInfo.priority = m_Priority;
13708  VmaPnextChainPushFront(&allocInfo, &priorityInfo);
13709  }
13710 #endif // #if VMA_MEMORY_PRIORITY
13711 
13712 #if VMA_EXTERNAL_MEMORY
13713  // Attach VkExportMemoryAllocateInfoKHR if necessary.
13714  VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
13715  exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);
13716  if(exportMemoryAllocInfo.handleTypes != 0)
13717  {
13718  VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
13719  }
13720 #endif // #if VMA_EXTERNAL_MEMORY
13721 
13722  VkDeviceMemory mem = VK_NULL_HANDLE;
13723  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
13724  if(res < 0)
13725  {
13726  return res;
13727  }
13728 
13729  // New VkDeviceMemory successfully created.
13730 
13731  // Create new Allocation for it.
13732  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
13733  pBlock->Init(
13734  m_hAllocator,
13735  m_hParentPool,
13736  m_MemoryTypeIndex,
13737  mem,
13738  allocInfo.allocationSize,
13739  m_NextBlockId++,
13740  m_Algorithm);
13741 
13742  m_Blocks.push_back(pBlock);
13743  if(pNewBlockIndex != VMA_NULL)
13744  {
13745  *pNewBlockIndex = m_Blocks.size() - 1;
13746  }
13747 
13748  return VK_SUCCESS;
13749 }
13750 
13751 void VmaBlockVector::ApplyDefragmentationMovesCpu(
13752  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13753  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
13754 {
13755  const size_t blockCount = m_Blocks.size();
13756  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
13757 
13758  enum BLOCK_FLAG
13759  {
13760  BLOCK_FLAG_USED = 0x00000001,
13761  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
13762  };
13763 
13764  struct BlockInfo
13765  {
13766  uint32_t flags;
13767  void* pMappedData;
13768  };
13769  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
13770  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
13771  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
13772 
13773  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13774  const size_t moveCount = moves.size();
13775  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13776  {
13777  const VmaDefragmentationMove& move = moves[moveIndex];
13778  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
13779  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
13780  }
13781 
13782  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13783 
13784  // Go over all blocks. Get mapped pointer or map if necessary.
13785  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13786  {
13787  BlockInfo& currBlockInfo = blockInfo[blockIndex];
13788  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13789  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
13790  {
13791  currBlockInfo.pMappedData = pBlock->GetMappedData();
13792  // It is not originally mapped - map it.
13793  if(currBlockInfo.pMappedData == VMA_NULL)
13794  {
13795  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
13796  if(pDefragCtx->res == VK_SUCCESS)
13797  {
13798  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
13799  }
13800  }
13801  }
13802  }
13803 
13804  // Go over all moves. Do actual data transfer.
13805  if(pDefragCtx->res == VK_SUCCESS)
13806  {
13807  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13808  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13809 
13810  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13811  {
13812  const VmaDefragmentationMove& move = moves[moveIndex];
13813 
13814  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
13815  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
13816 
13817  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
13818 
13819  // Invalidate source.
13820  if(isNonCoherent)
13821  {
13822  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
13823  memRange.memory = pSrcBlock->GetDeviceMemory();
13824  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
13825  memRange.size = VMA_MIN(
13826  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
13827  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
13828  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13829  }
13830 
13831  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
13832  memmove(
13833  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
13834  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
13835  static_cast<size_t>(move.size));
13836 
13837  if(IsCorruptionDetectionEnabled())
13838  {
13839  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
13840  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
13841  }
13842 
13843  // Flush destination.
13844  if(isNonCoherent)
13845  {
13846  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
13847  memRange.memory = pDstBlock->GetDeviceMemory();
13848  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
13849  memRange.size = VMA_MIN(
13850  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
13851  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
13852  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13853  }
13854  }
13855  }
13856 
13857  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
13858  // Regardless of pCtx->res == VK_SUCCESS.
13859  for(size_t blockIndex = blockCount; blockIndex--; )
13860  {
13861  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
13862  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
13863  {
13864  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13865  pBlock->Unmap(m_hAllocator, 1);
13866  }
13867  }
13868 }
13869 
13870 void VmaBlockVector::ApplyDefragmentationMovesGpu(
13871  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13872  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13873  VkCommandBuffer commandBuffer)
13874 {
13875  const size_t blockCount = m_Blocks.size();
13876 
13877  pDefragCtx->blockContexts.resize(blockCount);
13878  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
13879 
13880  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13881  const size_t moveCount = moves.size();
13882  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13883  {
13884  const VmaDefragmentationMove& move = moves[moveIndex];
13885 
13886  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
13887  {
13888  // Old school move still require us to map the whole block
13889  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13890  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13891  }
13892  }
13893 
13894  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13895 
13896  // Go over all blocks. Create and bind buffer for whole block if necessary.
13897  {
13898  VkBufferCreateInfo bufCreateInfo;
13899  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
13900 
13901  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13902  {
13903  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
13904  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13905  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
13906  {
13907  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
13908  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
13909  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
13910  if(pDefragCtx->res == VK_SUCCESS)
13911  {
13912  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
13913  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
13914  }
13915  }
13916  }
13917  }
13918 
13919  // Go over all moves. Post data transfer commands to command buffer.
13920  if(pDefragCtx->res == VK_SUCCESS)
13921  {
13922  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13923  {
13924  const VmaDefragmentationMove& move = moves[moveIndex];
13925 
13926  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
13927  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
13928 
13929  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
13930 
13931  VkBufferCopy region = {
13932  move.srcOffset,
13933  move.dstOffset,
13934  move.size };
13935  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
13936  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
13937  }
13938  }
13939 
13940  // Save buffers to defrag context for later destruction.
13941  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
13942  {
13943  pDefragCtx->res = VK_NOT_READY;
13944  }
13945 }
13946 
13947 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
13948 {
13949  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13950  {
13951  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13952  if(pBlock->m_pMetadata->IsEmpty())
13953  {
13954  if(m_Blocks.size() > m_MinBlockCount)
13955  {
13956  if(pDefragmentationStats != VMA_NULL)
13957  {
13958  ++pDefragmentationStats->deviceMemoryBlocksFreed;
13959  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
13960  }
13961 
13962  VmaVectorRemove(m_Blocks, blockIndex);
13963  pBlock->Destroy(m_hAllocator);
13964  vma_delete(m_hAllocator, pBlock);
13965  }
13966  else
13967  {
13968  break;
13969  }
13970  }
13971  }
13972  UpdateHasEmptyBlock();
13973 }
13974 
13975 void VmaBlockVector::UpdateHasEmptyBlock()
13976 {
13977  m_HasEmptyBlock = false;
13978  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
13979  {
13980  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
13981  if(pBlock->m_pMetadata->IsEmpty())
13982  {
13983  m_HasEmptyBlock = true;
13984  break;
13985  }
13986  }
13987 }
13988 
13989 #if VMA_STATS_STRING_ENABLED
13990 
13991 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13992 {
13993  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13994 
13995  json.BeginObject();
13996 
13997  if(IsCustomPool())
13998  {
13999  const char* poolName = m_hParentPool->GetName();
14000  if(poolName != VMA_NULL && poolName[0] != '\0')
14001  {
14002  json.WriteString("Name");
14003  json.WriteString(poolName);
14004  }
14005 
14006  json.WriteString("MemoryTypeIndex");
14007  json.WriteNumber(m_MemoryTypeIndex);
14008 
14009  json.WriteString("BlockSize");
14010  json.WriteNumber(m_PreferredBlockSize);
14011 
14012  json.WriteString("BlockCount");
14013  json.BeginObject(true);
14014  if(m_MinBlockCount > 0)
14015  {
14016  json.WriteString("Min");
14017  json.WriteNumber((uint64_t)m_MinBlockCount);
14018  }
14019  if(m_MaxBlockCount < SIZE_MAX)
14020  {
14021  json.WriteString("Max");
14022  json.WriteNumber((uint64_t)m_MaxBlockCount);
14023  }
14024  json.WriteString("Cur");
14025  json.WriteNumber((uint64_t)m_Blocks.size());
14026  json.EndObject();
14027 
14028  if(m_FrameInUseCount > 0)
14029  {
14030  json.WriteString("FrameInUseCount");
14031  json.WriteNumber(m_FrameInUseCount);
14032  }
14033 
14034  if(m_Algorithm != 0)
14035  {
14036  json.WriteString("Algorithm");
14037  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
14038  }
14039  }
14040  else
14041  {
14042  json.WriteString("PreferredBlockSize");
14043  json.WriteNumber(m_PreferredBlockSize);
14044  }
14045 
14046  json.WriteString("Blocks");
14047  json.BeginObject();
14048  for(size_t i = 0; i < m_Blocks.size(); ++i)
14049  {
14050  json.BeginString();
14051  json.ContinueString(m_Blocks[i]->GetId());
14052  json.EndString();
14053 
14054  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
14055  }
14056  json.EndObject();
14057 
14058  json.EndObject();
14059 }
14060 
14061 #endif // #if VMA_STATS_STRING_ENABLED
14062 
14063 void VmaBlockVector::Defragment(
14064  class VmaBlockVectorDefragmentationContext* pCtx,
14066  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
14067  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
14068  VkCommandBuffer commandBuffer)
14069 {
14070  pCtx->res = VK_SUCCESS;
14071 
14072  const VkMemoryPropertyFlags memPropFlags =
14073  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
14074  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
14075 
14076  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
14077  isHostVisible;
14078  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
14079  !IsCorruptionDetectionEnabled() &&
14080  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
14081 
14082  // There are options to defragment this memory type.
14083  if(canDefragmentOnCpu || canDefragmentOnGpu)
14084  {
14085  bool defragmentOnGpu;
14086  // There is only one option to defragment this memory type.
14087  if(canDefragmentOnGpu != canDefragmentOnCpu)
14088  {
14089  defragmentOnGpu = canDefragmentOnGpu;
14090  }
14091  // Both options are available: Heuristics to choose the best one.
14092  else
14093  {
14094  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
14095  m_hAllocator->IsIntegratedGpu();
14096  }
14097 
14098  bool overlappingMoveSupported = !defragmentOnGpu;
14099 
14100  if(m_hAllocator->m_UseMutex)
14101  {
14103  {
14104  if(!m_Mutex.TryLockWrite())
14105  {
14106  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
14107  return;
14108  }
14109  }
14110  else
14111  {
14112  m_Mutex.LockWrite();
14113  pCtx->mutexLocked = true;
14114  }
14115  }
14116 
14117  pCtx->Begin(overlappingMoveSupported, flags);
14118 
14119  // Defragment.
14120 
14121  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
14122  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
14123  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
14124 
14125  // Accumulate statistics.
14126  if(pStats != VMA_NULL)
14127  {
14128  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
14129  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
14130  pStats->bytesMoved += bytesMoved;
14131  pStats->allocationsMoved += allocationsMoved;
14132  VMA_ASSERT(bytesMoved <= maxBytesToMove);
14133  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
14134  if(defragmentOnGpu)
14135  {
14136  maxGpuBytesToMove -= bytesMoved;
14137  maxGpuAllocationsToMove -= allocationsMoved;
14138  }
14139  else
14140  {
14141  maxCpuBytesToMove -= bytesMoved;
14142  maxCpuAllocationsToMove -= allocationsMoved;
14143  }
14144  }
14145 
14147  {
14148  if(m_hAllocator->m_UseMutex)
14149  m_Mutex.UnlockWrite();
14150 
14151  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
14152  pCtx->res = VK_NOT_READY;
14153 
14154  return;
14155  }
14156 
14157  if(pCtx->res >= VK_SUCCESS)
14158  {
14159  if(defragmentOnGpu)
14160  {
14161  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
14162  }
14163  else
14164  {
14165  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
14166  }
14167  }
14168  }
14169 }
14170 
14171 void VmaBlockVector::DefragmentationEnd(
14172  class VmaBlockVectorDefragmentationContext* pCtx,
14173  uint32_t flags,
14174  VmaDefragmentationStats* pStats)
14175 {
14176  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
14177  {
14178  VMA_ASSERT(pCtx->mutexLocked == false);
14179 
14180  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
14181  // lock protecting us. Since we mutate state here, we have to take the lock out now
14182  m_Mutex.LockWrite();
14183  pCtx->mutexLocked = true;
14184  }
14185 
14186  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
14187  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
14188  {
14189  // Destroy buffers.
14190  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
14191  {
14192  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
14193  if(blockCtx.hBuffer)
14194  {
14195  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
14196  }
14197  }
14198 
14199  if(pCtx->res >= VK_SUCCESS)
14200  {
14201  FreeEmptyBlocks(pStats);
14202  }
14203  }
14204 
14205  if(pCtx->mutexLocked)
14206  {
14207  VMA_ASSERT(m_hAllocator->m_UseMutex);
14208  m_Mutex.UnlockWrite();
14209  }
14210 }
14211 
14212 uint32_t VmaBlockVector::ProcessDefragmentations(
14213  class VmaBlockVectorDefragmentationContext *pCtx,
14214  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
14215 {
14216  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
14217 
14218  const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
14219 
14220  for(uint32_t i = 0; i < moveCount; ++ i)
14221  {
14222  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
14223 
14224  pMove->allocation = move.hAllocation;
14225  pMove->memory = move.pDstBlock->GetDeviceMemory();
14226  pMove->offset = move.dstOffset;
14227 
14228  ++ pMove;
14229  }
14230 
14231  pCtx->defragmentationMovesProcessed += moveCount;
14232 
14233  return moveCount;
14234 }
14235 
14236 void VmaBlockVector::CommitDefragmentations(
14237  class VmaBlockVectorDefragmentationContext *pCtx,
14238  VmaDefragmentationStats* pStats)
14239 {
14240  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
14241 
14242  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
14243  {
14244  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
14245 
14246  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
14247  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
14248  }
14249 
14250  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
14251  FreeEmptyBlocks(pStats);
14252 }
14253 
14254 size_t VmaBlockVector::CalcAllocationCount() const
14255 {
14256  size_t result = 0;
14257  for(size_t i = 0; i < m_Blocks.size(); ++i)
14258  {
14259  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
14260  }
14261  return result;
14262 }
14263 
14264 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
14265 {
14266  if(m_BufferImageGranularity == 1)
14267  {
14268  return false;
14269  }
14270  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
14271  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
14272  {
14273  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
14274  VMA_ASSERT(m_Algorithm == 0);
14275  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
14276  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
14277  {
14278  return true;
14279  }
14280  }
14281  return false;
14282 }
14283 
14284 void VmaBlockVector::MakePoolAllocationsLost(
14285  uint32_t currentFrameIndex,
14286  size_t* pLostAllocationCount)
14287 {
14288  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
14289  size_t lostAllocationCount = 0;
14290  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
14291  {
14292  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
14293  VMA_ASSERT(pBlock);
14294  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
14295  }
14296  if(pLostAllocationCount != VMA_NULL)
14297  {
14298  *pLostAllocationCount = lostAllocationCount;
14299  }
14300 }
14301 
14302 VkResult VmaBlockVector::CheckCorruption()
14303 {
14304  if(!IsCorruptionDetectionEnabled())
14305  {
14306  return VK_ERROR_FEATURE_NOT_PRESENT;
14307  }
14308 
14309  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
14310  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
14311  {
14312  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
14313  VMA_ASSERT(pBlock);
14314  VkResult res = pBlock->CheckCorruption(m_hAllocator);
14315  if(res != VK_SUCCESS)
14316  {
14317  return res;
14318  }
14319  }
14320  return VK_SUCCESS;
14321 }
14322 
14323 void VmaBlockVector::AddStats(VmaStats* pStats)
14324 {
14325  const uint32_t memTypeIndex = m_MemoryTypeIndex;
14326  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
14327 
14328  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
14329 
14330  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
14331  {
14332  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
14333  VMA_ASSERT(pBlock);
14334  VMA_HEAVY_ASSERT(pBlock->Validate());
14335  VmaStatInfo allocationStatInfo;
14336  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
14337  VmaAddStatInfo(pStats->total, allocationStatInfo);
14338  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14339  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14340  }
14341 }
14342 
14344 // VmaDefragmentationAlgorithm_Generic members definition
14345 
14346 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
14347  VmaAllocator hAllocator,
14348  VmaBlockVector* pBlockVector,
14349  uint32_t currentFrameIndex,
14350  bool overlappingMoveSupported) :
14351  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14352  m_AllocationCount(0),
14353  m_AllAllocations(false),
14354  m_BytesMoved(0),
14355  m_AllocationsMoved(0),
14356  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
14357 {
14358  // Create block info for each block.
14359  const size_t blockCount = m_pBlockVector->m_Blocks.size();
14360  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14361  {
14362  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
14363  pBlockInfo->m_OriginalBlockIndex = blockIndex;
14364  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
14365  m_Blocks.push_back(pBlockInfo);
14366  }
14367 
14368  // Sort them by m_pBlock pointer value.
14369  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
14370 }
14371 
14372 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
14373 {
14374  for(size_t i = m_Blocks.size(); i--; )
14375  {
14376  vma_delete(m_hAllocator, m_Blocks[i]);
14377  }
14378 }
14379 
14380 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14381 {
14382  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
14383  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
14384  {
14385  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
14386  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
14387  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
14388  {
14389  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
14390  (*it)->m_Allocations.push_back(allocInfo);
14391  }
14392  else
14393  {
14394  VMA_ASSERT(0);
14395  }
14396 
14397  ++m_AllocationCount;
14398  }
14399 }
14400 
14401 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
14402  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14403  VkDeviceSize maxBytesToMove,
14404  uint32_t maxAllocationsToMove,
14405  bool freeOldAllocations)
14406 {
14407  if(m_Blocks.empty())
14408  {
14409  return VK_SUCCESS;
14410  }
14411 
14412  // This is a choice based on research.
14413  // Option 1:
14414  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
14415  // Option 2:
14416  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
14417  // Option 3:
14418  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
14419 
14420  size_t srcBlockMinIndex = 0;
14421  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
14422  /*
14423  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
14424  {
14425  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
14426  if(blocksWithNonMovableCount > 0)
14427  {
14428  srcBlockMinIndex = blocksWithNonMovableCount - 1;
14429  }
14430  }
14431  */
14432 
14433  size_t srcBlockIndex = m_Blocks.size() - 1;
14434  size_t srcAllocIndex = SIZE_MAX;
14435  for(;;)
14436  {
14437  // 1. Find next allocation to move.
14438  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
14439  // 1.2. Then start from last to first m_Allocations.
14440  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
14441  {
14442  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
14443  {
14444  // Finished: no more allocations to process.
14445  if(srcBlockIndex == srcBlockMinIndex)
14446  {
14447  return VK_SUCCESS;
14448  }
14449  else
14450  {
14451  --srcBlockIndex;
14452  srcAllocIndex = SIZE_MAX;
14453  }
14454  }
14455  else
14456  {
14457  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
14458  }
14459  }
14460 
14461  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
14462  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
14463 
14464  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
14465  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
14466  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
14467  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
14468 
14469  // 2. Try to find new place for this allocation in preceding or current block.
14470  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
14471  {
14472  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
14473  VmaAllocationRequest dstAllocRequest;
14474  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
14475  m_CurrentFrameIndex,
14476  m_pBlockVector->GetFrameInUseCount(),
14477  m_pBlockVector->GetBufferImageGranularity(),
14478  size,
14479  alignment,
14480  false, // upperAddress
14481  suballocType,
14482  false, // canMakeOtherLost
14483  strategy,
14484  &dstAllocRequest) &&
14485  MoveMakesSense(
14486  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
14487  {
14488  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
14489 
14490  // Reached limit on number of allocations or bytes to move.
14491  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
14492  (m_BytesMoved + size > maxBytesToMove))
14493  {
14494  return VK_SUCCESS;
14495  }
14496 
14497  VmaDefragmentationMove move = {};
14498  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
14499  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
14500  move.srcOffset = srcOffset;
14501  move.dstOffset = dstAllocRequest.offset;
14502  move.size = size;
14503  move.hAllocation = allocInfo.m_hAllocation;
14504  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
14505  move.pDstBlock = pDstBlockInfo->m_pBlock;
14506 
14507  moves.push_back(move);
14508 
14509  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
14510  dstAllocRequest,
14511  suballocType,
14512  size,
14513  allocInfo.m_hAllocation);
14514 
14515  if(freeOldAllocations)
14516  {
14517  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
14518  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
14519  }
14520 
14521  if(allocInfo.m_pChanged != VMA_NULL)
14522  {
14523  *allocInfo.m_pChanged = VK_TRUE;
14524  }
14525 
14526  ++m_AllocationsMoved;
14527  m_BytesMoved += size;
14528 
14529  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
14530 
14531  break;
14532  }
14533  }
14534 
14535  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
14536 
14537  if(srcAllocIndex > 0)
14538  {
14539  --srcAllocIndex;
14540  }
14541  else
14542  {
14543  if(srcBlockIndex > 0)
14544  {
14545  --srcBlockIndex;
14546  srcAllocIndex = SIZE_MAX;
14547  }
14548  else
14549  {
14550  return VK_SUCCESS;
14551  }
14552  }
14553  }
14554 }
14555 
14556 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
14557 {
14558  size_t result = 0;
14559  for(size_t i = 0; i < m_Blocks.size(); ++i)
14560  {
14561  if(m_Blocks[i]->m_HasNonMovableAllocations)
14562  {
14563  ++result;
14564  }
14565  }
14566  return result;
14567 }
14568 
14569 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
14570  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14571  VkDeviceSize maxBytesToMove,
14572  uint32_t maxAllocationsToMove,
14574 {
14575  if(!m_AllAllocations && m_AllocationCount == 0)
14576  {
14577  return VK_SUCCESS;
14578  }
14579 
14580  const size_t blockCount = m_Blocks.size();
14581  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14582  {
14583  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
14584 
14585  if(m_AllAllocations)
14586  {
14587  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
14588  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
14589  it != pMetadata->m_Suballocations.end();
14590  ++it)
14591  {
14592  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
14593  {
14594  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
14595  pBlockInfo->m_Allocations.push_back(allocInfo);
14596  }
14597  }
14598  }
14599 
14600  pBlockInfo->CalcHasNonMovableAllocations();
14601 
14602  // This is a choice based on research.
14603  // Option 1:
14604  pBlockInfo->SortAllocationsByOffsetDescending();
14605  // Option 2:
14606  //pBlockInfo->SortAllocationsBySizeDescending();
14607  }
14608 
14609  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
14610  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
14611 
14612  // This is a choice based on research.
14613  const uint32_t roundCount = 2;
14614 
14615  // Execute defragmentation rounds (the main part).
14616  VkResult result = VK_SUCCESS;
14617  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
14618  {
14619  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
14620  }
14621 
14622  return result;
14623 }
14624 
14625 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
14626  size_t dstBlockIndex, VkDeviceSize dstOffset,
14627  size_t srcBlockIndex, VkDeviceSize srcOffset)
14628 {
14629  if(dstBlockIndex < srcBlockIndex)
14630  {
14631  return true;
14632  }
14633  if(dstBlockIndex > srcBlockIndex)
14634  {
14635  return false;
14636  }
14637  if(dstOffset < srcOffset)
14638  {
14639  return true;
14640  }
14641  return false;
14642 }
14643 
14645 // VmaDefragmentationAlgorithm_Fast
14646 
14647 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
14648  VmaAllocator hAllocator,
14649  VmaBlockVector* pBlockVector,
14650  uint32_t currentFrameIndex,
14651  bool overlappingMoveSupported) :
14652  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14653  m_OverlappingMoveSupported(overlappingMoveSupported),
14654  m_AllocationCount(0),
14655  m_AllAllocations(false),
14656  m_BytesMoved(0),
14657  m_AllocationsMoved(0),
14658  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
14659 {
14660  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
14661 
14662 }
14663 
14664 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
14665 {
14666 }
14667 
14668 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
14669  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14670  VkDeviceSize maxBytesToMove,
14671  uint32_t maxAllocationsToMove,
14673 {
14674  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
14675 
14676  const size_t blockCount = m_pBlockVector->GetBlockCount();
14677  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
14678  {
14679  return VK_SUCCESS;
14680  }
14681 
14682  PreprocessMetadata();
14683 
14684  // Sort blocks in order from most destination.
14685 
14686  m_BlockInfos.resize(blockCount);
14687  for(size_t i = 0; i < blockCount; ++i)
14688  {
14689  m_BlockInfos[i].origBlockIndex = i;
14690  }
14691 
14692  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
14693  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
14694  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
14695  });
14696 
14697  // THE MAIN ALGORITHM
14698 
14699  FreeSpaceDatabase freeSpaceDb;
14700 
14701  size_t dstBlockInfoIndex = 0;
14702  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14703  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14704  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14705  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
14706  VkDeviceSize dstOffset = 0;
14707 
14708  bool end = false;
14709  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
14710  {
14711  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
14712  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
14713  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
14714  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
14715  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
14716  {
14717  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
14718  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
14719  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
14720  if(m_AllocationsMoved == maxAllocationsToMove ||
14721  m_BytesMoved + srcAllocSize > maxBytesToMove)
14722  {
14723  end = true;
14724  break;
14725  }
14726  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
14727 
14728  VmaDefragmentationMove move = {};
14729  // Try to place it in one of free spaces from the database.
14730  size_t freeSpaceInfoIndex;
14731  VkDeviceSize dstAllocOffset;
14732  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
14733  freeSpaceInfoIndex, dstAllocOffset))
14734  {
14735  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
14736  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
14737  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
14738 
14739  // Same block
14740  if(freeSpaceInfoIndex == srcBlockInfoIndex)
14741  {
14742  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14743 
14744  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14745 
14746  VmaSuballocation suballoc = *srcSuballocIt;
14747  suballoc.offset = dstAllocOffset;
14748  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
14749  m_BytesMoved += srcAllocSize;
14750  ++m_AllocationsMoved;
14751 
14752  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14753  ++nextSuballocIt;
14754  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14755  srcSuballocIt = nextSuballocIt;
14756 
14757  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14758 
14759  move.srcBlockIndex = srcOrigBlockIndex;
14760  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14761  move.srcOffset = srcAllocOffset;
14762  move.dstOffset = dstAllocOffset;
14763  move.size = srcAllocSize;
14764 
14765  moves.push_back(move);
14766  }
14767  // Different block
14768  else
14769  {
14770  // MOVE OPTION 2: Move the allocation to a different block.
14771 
14772  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
14773 
14774  VmaSuballocation suballoc = *srcSuballocIt;
14775  suballoc.offset = dstAllocOffset;
14776  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
14777  m_BytesMoved += srcAllocSize;
14778  ++m_AllocationsMoved;
14779 
14780  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14781  ++nextSuballocIt;
14782  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14783  srcSuballocIt = nextSuballocIt;
14784 
14785  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14786 
14787  move.srcBlockIndex = srcOrigBlockIndex;
14788  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14789  move.srcOffset = srcAllocOffset;
14790  move.dstOffset = dstAllocOffset;
14791  move.size = srcAllocSize;
14792 
14793  moves.push_back(move);
14794  }
14795  }
14796  else
14797  {
14798  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
14799 
14800  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
14801  while(dstBlockInfoIndex < srcBlockInfoIndex &&
14802  dstAllocOffset + srcAllocSize > dstBlockSize)
14803  {
14804  // But before that, register remaining free space at the end of dst block.
14805  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
14806 
14807  ++dstBlockInfoIndex;
14808  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14809  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14810  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14811  dstBlockSize = pDstMetadata->GetSize();
14812  dstOffset = 0;
14813  dstAllocOffset = 0;
14814  }
14815 
14816  // Same block
14817  if(dstBlockInfoIndex == srcBlockInfoIndex)
14818  {
14819  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14820 
14821  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
14822 
14823  bool skipOver = overlap;
14824  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
14825  {
14826  // If destination and source place overlap, skip if it would move it
14827  // by only < 1/64 of its size.
14828  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
14829  }
14830 
14831  if(skipOver)
14832  {
14833  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
14834 
14835  dstOffset = srcAllocOffset + srcAllocSize;
14836  ++srcSuballocIt;
14837  }
14838  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14839  else
14840  {
14841  srcSuballocIt->offset = dstAllocOffset;
14842  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
14843  dstOffset = dstAllocOffset + srcAllocSize;
14844  m_BytesMoved += srcAllocSize;
14845  ++m_AllocationsMoved;
14846  ++srcSuballocIt;
14847 
14848  move.srcBlockIndex = srcOrigBlockIndex;
14849  move.dstBlockIndex = dstOrigBlockIndex;
14850  move.srcOffset = srcAllocOffset;
14851  move.dstOffset = dstAllocOffset;
14852  move.size = srcAllocSize;
14853 
14854  moves.push_back(move);
14855  }
14856  }
14857  // Different block
14858  else
14859  {
14860  // MOVE OPTION 2: Move the allocation to a different block.
14861 
14862  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
14863  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
14864 
14865  VmaSuballocation suballoc = *srcSuballocIt;
14866  suballoc.offset = dstAllocOffset;
14867  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
14868  dstOffset = dstAllocOffset + srcAllocSize;
14869  m_BytesMoved += srcAllocSize;
14870  ++m_AllocationsMoved;
14871 
14872  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14873  ++nextSuballocIt;
14874  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14875  srcSuballocIt = nextSuballocIt;
14876 
14877  pDstMetadata->m_Suballocations.push_back(suballoc);
14878 
14879  move.srcBlockIndex = srcOrigBlockIndex;
14880  move.dstBlockIndex = dstOrigBlockIndex;
14881  move.srcOffset = srcAllocOffset;
14882  move.dstOffset = dstAllocOffset;
14883  move.size = srcAllocSize;
14884 
14885  moves.push_back(move);
14886  }
14887  }
14888  }
14889  }
14890 
14891  m_BlockInfos.clear();
14892 
14893  PostprocessMetadata();
14894 
14895  return VK_SUCCESS;
14896 }
14897 
14898 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
14899 {
14900  const size_t blockCount = m_pBlockVector->GetBlockCount();
14901  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14902  {
14903  VmaBlockMetadata_Generic* const pMetadata =
14904  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14905  pMetadata->m_FreeCount = 0;
14906  pMetadata->m_SumFreeSize = pMetadata->GetSize();
14907  pMetadata->m_FreeSuballocationsBySize.clear();
14908  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14909  it != pMetadata->m_Suballocations.end(); )
14910  {
14911  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
14912  {
14913  VmaSuballocationList::iterator nextIt = it;
14914  ++nextIt;
14915  pMetadata->m_Suballocations.erase(it);
14916  it = nextIt;
14917  }
14918  else
14919  {
14920  ++it;
14921  }
14922  }
14923  }
14924 }
14925 
14926 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
14927 {
14928  const size_t blockCount = m_pBlockVector->GetBlockCount();
14929  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14930  {
14931  VmaBlockMetadata_Generic* const pMetadata =
14932  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14933  const VkDeviceSize blockSize = pMetadata->GetSize();
14934 
14935  // No allocations in this block - entire area is free.
14936  if(pMetadata->m_Suballocations.empty())
14937  {
14938  pMetadata->m_FreeCount = 1;
14939  //pMetadata->m_SumFreeSize is already set to blockSize.
14940  VmaSuballocation suballoc = {
14941  0, // offset
14942  blockSize, // size
14943  VMA_NULL, // hAllocation
14944  VMA_SUBALLOCATION_TYPE_FREE };
14945  pMetadata->m_Suballocations.push_back(suballoc);
14946  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
14947  }
14948  // There are some allocations in this block.
14949  else
14950  {
14951  VkDeviceSize offset = 0;
14952  VmaSuballocationList::iterator it;
14953  for(it = pMetadata->m_Suballocations.begin();
14954  it != pMetadata->m_Suballocations.end();
14955  ++it)
14956  {
14957  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
14958  VMA_ASSERT(it->offset >= offset);
14959 
14960  // Need to insert preceding free space.
14961  if(it->offset > offset)
14962  {
14963  ++pMetadata->m_FreeCount;
14964  const VkDeviceSize freeSize = it->offset - offset;
14965  VmaSuballocation suballoc = {
14966  offset, // offset
14967  freeSize, // size
14968  VMA_NULL, // hAllocation
14969  VMA_SUBALLOCATION_TYPE_FREE };
14970  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14971  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14972  {
14973  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
14974  }
14975  }
14976 
14977  pMetadata->m_SumFreeSize -= it->size;
14978  offset = it->offset + it->size;
14979  }
14980 
14981  // Need to insert trailing free space.
14982  if(offset < blockSize)
14983  {
14984  ++pMetadata->m_FreeCount;
14985  const VkDeviceSize freeSize = blockSize - offset;
14986  VmaSuballocation suballoc = {
14987  offset, // offset
14988  freeSize, // size
14989  VMA_NULL, // hAllocation
14990  VMA_SUBALLOCATION_TYPE_FREE };
14991  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
14992  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14993  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14994  {
14995  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
14996  }
14997  }
14998 
14999  VMA_SORT(
15000  pMetadata->m_FreeSuballocationsBySize.begin(),
15001  pMetadata->m_FreeSuballocationsBySize.end(),
15002  VmaSuballocationItemSizeLess());
15003  }
15004 
15005  VMA_HEAVY_ASSERT(pMetadata->Validate());
15006  }
15007 }
15008 
15009 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
15010 {
15011  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
15012  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
15013  while(it != pMetadata->m_Suballocations.end())
15014  {
15015  if(it->offset < suballoc.offset)
15016  {
15017  ++it;
15018  }
15019  }
15020  pMetadata->m_Suballocations.insert(it, suballoc);
15021 }
15022 
15024 // VmaBlockVectorDefragmentationContext
15025 
15026 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
15027  VmaAllocator hAllocator,
15028  VmaPool hCustomPool,
15029  VmaBlockVector* pBlockVector,
15030  uint32_t currFrameIndex) :
15031  res(VK_SUCCESS),
15032  mutexLocked(false),
15033  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
15034  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
15035  defragmentationMovesProcessed(0),
15036  defragmentationMovesCommitted(0),
15037  hasDefragmentationPlan(0),
15038  m_hAllocator(hAllocator),
15039  m_hCustomPool(hCustomPool),
15040  m_pBlockVector(pBlockVector),
15041  m_CurrFrameIndex(currFrameIndex),
15042  m_pAlgorithm(VMA_NULL),
15043  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
15044  m_AllAllocations(false)
15045 {
15046 }
15047 
15048 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
15049 {
15050  vma_delete(m_hAllocator, m_pAlgorithm);
15051 }
15052 
15053 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
15054 {
15055  AllocInfo info = { hAlloc, pChanged };
15056  m_Allocations.push_back(info);
15057 }
15058 
15059 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
15060 {
15061  const bool allAllocations = m_AllAllocations ||
15062  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
15063 
15064  /********************************
15065  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
15066  ********************************/
15067 
15068  /*
15069  Fast algorithm is supported only when certain criteria are met:
15070  - VMA_DEBUG_MARGIN is 0.
15071  - All allocations in this block vector are moveable.
15072  - There is no possibility of image/buffer granularity conflict.
15073  - The defragmentation is not incremental
15074  */
15075  if(VMA_DEBUG_MARGIN == 0 &&
15076  allAllocations &&
15077  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
15079  {
15080  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
15081  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
15082  }
15083  else
15084  {
15085  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
15086  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
15087  }
15088 
15089  if(allAllocations)
15090  {
15091  m_pAlgorithm->AddAll();
15092  }
15093  else
15094  {
15095  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
15096  {
15097  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
15098  }
15099  }
15100 }
15101 
15103 // VmaDefragmentationContext
15104 
15105 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
15106  VmaAllocator hAllocator,
15107  uint32_t currFrameIndex,
15108  uint32_t flags,
15109  VmaDefragmentationStats* pStats) :
15110  m_hAllocator(hAllocator),
15111  m_CurrFrameIndex(currFrameIndex),
15112  m_Flags(flags),
15113  m_pStats(pStats),
15114  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
15115 {
15116  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
15117 }
15118 
15119 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
15120 {
15121  for(size_t i = m_CustomPoolContexts.size(); i--; )
15122  {
15123  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
15124  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
15125  vma_delete(m_hAllocator, pBlockVectorCtx);
15126  }
15127  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
15128  {
15129  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
15130  if(pBlockVectorCtx)
15131  {
15132  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
15133  vma_delete(m_hAllocator, pBlockVectorCtx);
15134  }
15135  }
15136 }
15137 
15138 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
15139 {
15140  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15141  {
15142  VmaPool pool = pPools[poolIndex];
15143  VMA_ASSERT(pool);
15144  // Pools with algorithm other than default are not defragmented.
15145  if(pool->m_BlockVector.GetAlgorithm() == 0)
15146  {
15147  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
15148 
15149  for(size_t i = m_CustomPoolContexts.size(); i--; )
15150  {
15151  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
15152  {
15153  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
15154  break;
15155  }
15156  }
15157 
15158  if(!pBlockVectorDefragCtx)
15159  {
15160  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
15161  m_hAllocator,
15162  pool,
15163  &pool->m_BlockVector,
15164  m_CurrFrameIndex);
15165  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
15166  }
15167 
15168  pBlockVectorDefragCtx->AddAll();
15169  }
15170  }
15171 }
15172 
15173 void VmaDefragmentationContext_T::AddAllocations(
15174  uint32_t allocationCount,
15175  const VmaAllocation* pAllocations,
15176  VkBool32* pAllocationsChanged)
15177 {
15178  // Dispatch pAllocations among defragmentators. Create them when necessary.
15179  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15180  {
15181  const VmaAllocation hAlloc = pAllocations[allocIndex];
15182  VMA_ASSERT(hAlloc);
15183  // DedicatedAlloc cannot be defragmented.
15184  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
15185  // Lost allocation cannot be defragmented.
15186  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
15187  {
15188  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
15189 
15190  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
15191  // This allocation belongs to custom pool.
15192  if(hAllocPool != VK_NULL_HANDLE)
15193  {
15194  // Pools with algorithm other than default are not defragmented.
15195  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
15196  {
15197  for(size_t i = m_CustomPoolContexts.size(); i--; )
15198  {
15199  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
15200  {
15201  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
15202  break;
15203  }
15204  }
15205  if(!pBlockVectorDefragCtx)
15206  {
15207  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
15208  m_hAllocator,
15209  hAllocPool,
15210  &hAllocPool->m_BlockVector,
15211  m_CurrFrameIndex);
15212  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
15213  }
15214  }
15215  }
15216  // This allocation belongs to default pool.
15217  else
15218  {
15219  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
15220  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
15221  if(!pBlockVectorDefragCtx)
15222  {
15223  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
15224  m_hAllocator,
15225  VMA_NULL, // hCustomPool
15226  m_hAllocator->m_pBlockVectors[memTypeIndex],
15227  m_CurrFrameIndex);
15228  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
15229  }
15230  }
15231 
15232  if(pBlockVectorDefragCtx)
15233  {
15234  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
15235  &pAllocationsChanged[allocIndex] : VMA_NULL;
15236  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
15237  }
15238  }
15239  }
15240 }
15241 
15242 VkResult VmaDefragmentationContext_T::Defragment(
15243  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
15244  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
15245  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
15246 {
15247  if(pStats)
15248  {
15249  memset(pStats, 0, sizeof(VmaDefragmentationStats));
15250  }
15251 
15253  {
15254  // For incremental defragmetnations, we just earmark how much we can move
15255  // The real meat is in the defragmentation steps
15256  m_MaxCpuBytesToMove = maxCpuBytesToMove;
15257  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
15258 
15259  m_MaxGpuBytesToMove = maxGpuBytesToMove;
15260  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
15261 
15262  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
15263  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
15264  return VK_SUCCESS;
15265 
15266  return VK_NOT_READY;
15267  }
15268 
15269  if(commandBuffer == VK_NULL_HANDLE)
15270  {
15271  maxGpuBytesToMove = 0;
15272  maxGpuAllocationsToMove = 0;
15273  }
15274 
15275  VkResult res = VK_SUCCESS;
15276 
15277  // Process default pools.
15278  for(uint32_t memTypeIndex = 0;
15279  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
15280  ++memTypeIndex)
15281  {
15282  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15283  if(pBlockVectorCtx)
15284  {
15285  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15286  pBlockVectorCtx->GetBlockVector()->Defragment(
15287  pBlockVectorCtx,
15288  pStats, flags,
15289  maxCpuBytesToMove, maxCpuAllocationsToMove,
15290  maxGpuBytesToMove, maxGpuAllocationsToMove,
15291  commandBuffer);
15292  if(pBlockVectorCtx->res != VK_SUCCESS)
15293  {
15294  res = pBlockVectorCtx->res;
15295  }
15296  }
15297  }
15298 
15299  // Process custom pools.
15300  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15301  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
15302  ++customCtxIndex)
15303  {
15304  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15305  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15306  pBlockVectorCtx->GetBlockVector()->Defragment(
15307  pBlockVectorCtx,
15308  pStats, flags,
15309  maxCpuBytesToMove, maxCpuAllocationsToMove,
15310  maxGpuBytesToMove, maxGpuAllocationsToMove,
15311  commandBuffer);
15312  if(pBlockVectorCtx->res != VK_SUCCESS)
15313  {
15314  res = pBlockVectorCtx->res;
15315  }
15316  }
15317 
15318  return res;
15319 }
15320 
15321 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
15322 {
15323  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
15324  uint32_t movesLeft = pInfo->moveCount;
15325 
15326  // Process default pools.
15327  for(uint32_t memTypeIndex = 0;
15328  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
15329  ++memTypeIndex)
15330  {
15331  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15332  if(pBlockVectorCtx)
15333  {
15334  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15335 
15336  if(!pBlockVectorCtx->hasDefragmentationPlan)
15337  {
15338  pBlockVectorCtx->GetBlockVector()->Defragment(
15339  pBlockVectorCtx,
15340  m_pStats, m_Flags,
15341  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
15342  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
15343  VK_NULL_HANDLE);
15344 
15345  if(pBlockVectorCtx->res < VK_SUCCESS)
15346  continue;
15347 
15348  pBlockVectorCtx->hasDefragmentationPlan = true;
15349  }
15350 
15351  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
15352  pBlockVectorCtx,
15353  pCurrentMove, movesLeft);
15354 
15355  movesLeft -= processed;
15356  pCurrentMove += processed;
15357  }
15358  }
15359 
15360  // Process custom pools.
15361  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15362  customCtxIndex < customCtxCount;
15363  ++customCtxIndex)
15364  {
15365  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15366  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15367 
15368  if(!pBlockVectorCtx->hasDefragmentationPlan)
15369  {
15370  pBlockVectorCtx->GetBlockVector()->Defragment(
15371  pBlockVectorCtx,
15372  m_pStats, m_Flags,
15373  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
15374  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
15375  VK_NULL_HANDLE);
15376 
15377  if(pBlockVectorCtx->res < VK_SUCCESS)
15378  continue;
15379 
15380  pBlockVectorCtx->hasDefragmentationPlan = true;
15381  }
15382 
15383  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
15384  pBlockVectorCtx,
15385  pCurrentMove, movesLeft);
15386 
15387  movesLeft -= processed;
15388  pCurrentMove += processed;
15389  }
15390 
15391  pInfo->moveCount = pInfo->moveCount - movesLeft;
15392 
15393  return VK_SUCCESS;
15394 }
15395 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
15396 {
15397  VkResult res = VK_SUCCESS;
15398 
15399  // Process default pools.
15400  for(uint32_t memTypeIndex = 0;
15401  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
15402  ++memTypeIndex)
15403  {
15404  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15405  if(pBlockVectorCtx)
15406  {
15407  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15408 
15409  if(!pBlockVectorCtx->hasDefragmentationPlan)
15410  {
15411  res = VK_NOT_READY;
15412  continue;
15413  }
15414 
15415  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
15416  pBlockVectorCtx, m_pStats);
15417 
15418  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15419  res = VK_NOT_READY;
15420  }
15421  }
15422 
15423  // Process custom pools.
15424  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15425  customCtxIndex < customCtxCount;
15426  ++customCtxIndex)
15427  {
15428  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15429  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15430 
15431  if(!pBlockVectorCtx->hasDefragmentationPlan)
15432  {
15433  res = VK_NOT_READY;
15434  continue;
15435  }
15436 
15437  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
15438  pBlockVectorCtx, m_pStats);
15439 
15440  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15441  res = VK_NOT_READY;
15442  }
15443 
15444  return res;
15445 }
15446 
15448 // VmaRecorder
15449 
15450 #if VMA_RECORDING_ENABLED
15451 
15452 VmaRecorder::VmaRecorder() :
15453  m_UseMutex(true),
15454  m_Flags(0),
15455  m_File(VMA_NULL),
15456  m_RecordingStartTime(std::chrono::high_resolution_clock::now())
15457 {
15458 }
15459 
15460 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
15461 {
15462  m_UseMutex = useMutex;
15463  m_Flags = settings.flags;
15464 
15465 #if defined(_WIN32)
15466  // Open file for writing.
15467  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
15468 
15469  if(err != 0)
15470  {
15471  return VK_ERROR_INITIALIZATION_FAILED;
15472  }
15473 #else
15474  // Open file for writing.
15475  m_File = fopen(settings.pFilePath, "wb");
15476 
15477  if(m_File == 0)
15478  {
15479  return VK_ERROR_INITIALIZATION_FAILED;
15480  }
15481 #endif
15482 
15483  // Write header.
15484  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
15485  fprintf(m_File, "%s\n", "1,8");
15486 
15487  return VK_SUCCESS;
15488 }
15489 
15490 VmaRecorder::~VmaRecorder()
15491 {
15492  if(m_File != VMA_NULL)
15493  {
15494  fclose(m_File);
15495  }
15496 }
15497 
15498 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
15499 {
15500  CallParams callParams;
15501  GetBasicParams(callParams);
15502 
15503  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15504  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
15505  Flush();
15506 }
15507 
15508 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
15509 {
15510  CallParams callParams;
15511  GetBasicParams(callParams);
15512 
15513  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15514  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
15515  Flush();
15516 }
15517 
15518 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
15519 {
15520  CallParams callParams;
15521  GetBasicParams(callParams);
15522 
15523  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15524  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
15525  createInfo.memoryTypeIndex,
15526  createInfo.flags,
15527  createInfo.blockSize,
15528  (uint64_t)createInfo.minBlockCount,
15529  (uint64_t)createInfo.maxBlockCount,
15530  createInfo.frameInUseCount,
15531  pool);
15532  Flush();
15533 }
15534 
15535 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
15536 {
15537  CallParams callParams;
15538  GetBasicParams(callParams);
15539 
15540  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15541  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
15542  pool);
15543  Flush();
15544 }
15545 
15546 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
15547  const VkMemoryRequirements& vkMemReq,
15548  const VmaAllocationCreateInfo& createInfo,
15549  VmaAllocation allocation)
15550 {
15551  CallParams callParams;
15552  GetBasicParams(callParams);
15553 
15554  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15555  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15556  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15557  vkMemReq.size,
15558  vkMemReq.alignment,
15559  vkMemReq.memoryTypeBits,
15560  createInfo.flags,
15561  createInfo.usage,
15562  createInfo.requiredFlags,
15563  createInfo.preferredFlags,
15564  createInfo.memoryTypeBits,
15565  createInfo.pool,
15566  allocation,
15567  userDataStr.GetString());
15568  Flush();
15569 }
15570 
15571 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
15572  const VkMemoryRequirements& vkMemReq,
15573  const VmaAllocationCreateInfo& createInfo,
15574  uint64_t allocationCount,
15575  const VmaAllocation* pAllocations)
15576 {
15577  CallParams callParams;
15578  GetBasicParams(callParams);
15579 
15580  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15581  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15582  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
15583  vkMemReq.size,
15584  vkMemReq.alignment,
15585  vkMemReq.memoryTypeBits,
15586  createInfo.flags,
15587  createInfo.usage,
15588  createInfo.requiredFlags,
15589  createInfo.preferredFlags,
15590  createInfo.memoryTypeBits,
15591  createInfo.pool);
15592  PrintPointerList(allocationCount, pAllocations);
15593  fprintf(m_File, ",%s\n", userDataStr.GetString());
15594  Flush();
15595 }
15596 
15597 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
15598  const VkMemoryRequirements& vkMemReq,
15599  bool requiresDedicatedAllocation,
15600  bool prefersDedicatedAllocation,
15601  const VmaAllocationCreateInfo& createInfo,
15602  VmaAllocation allocation)
15603 {
15604  CallParams callParams;
15605  GetBasicParams(callParams);
15606 
15607  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15608  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15609  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15610  vkMemReq.size,
15611  vkMemReq.alignment,
15612  vkMemReq.memoryTypeBits,
15613  requiresDedicatedAllocation ? 1 : 0,
15614  prefersDedicatedAllocation ? 1 : 0,
15615  createInfo.flags,
15616  createInfo.usage,
15617  createInfo.requiredFlags,
15618  createInfo.preferredFlags,
15619  createInfo.memoryTypeBits,
15620  createInfo.pool,
15621  allocation,
15622  userDataStr.GetString());
15623  Flush();
15624 }
15625 
15626 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
15627  const VkMemoryRequirements& vkMemReq,
15628  bool requiresDedicatedAllocation,
15629  bool prefersDedicatedAllocation,
15630  const VmaAllocationCreateInfo& createInfo,
15631  VmaAllocation allocation)
15632 {
15633  CallParams callParams;
15634  GetBasicParams(callParams);
15635 
15636  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15637  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15638  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15639  vkMemReq.size,
15640  vkMemReq.alignment,
15641  vkMemReq.memoryTypeBits,
15642  requiresDedicatedAllocation ? 1 : 0,
15643  prefersDedicatedAllocation ? 1 : 0,
15644  createInfo.flags,
15645  createInfo.usage,
15646  createInfo.requiredFlags,
15647  createInfo.preferredFlags,
15648  createInfo.memoryTypeBits,
15649  createInfo.pool,
15650  allocation,
15651  userDataStr.GetString());
15652  Flush();
15653 }
15654 
15655 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
15656  VmaAllocation allocation)
15657 {
15658  CallParams callParams;
15659  GetBasicParams(callParams);
15660 
15661  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15662  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15663  allocation);
15664  Flush();
15665 }
15666 
15667 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
15668  uint64_t allocationCount,
15669  const VmaAllocation* pAllocations)
15670 {
15671  CallParams callParams;
15672  GetBasicParams(callParams);
15673 
15674  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15675  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
15676  PrintPointerList(allocationCount, pAllocations);
15677  fprintf(m_File, "\n");
15678  Flush();
15679 }
15680 
15681 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
15682  VmaAllocation allocation,
15683  const void* pUserData)
15684 {
15685  CallParams callParams;
15686  GetBasicParams(callParams);
15687 
15688  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15689  UserDataString userDataStr(
15690  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
15691  pUserData);
15692  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15693  allocation,
15694  userDataStr.GetString());
15695  Flush();
15696 }
15697 
15698 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
15699  VmaAllocation allocation)
15700 {
15701  CallParams callParams;
15702  GetBasicParams(callParams);
15703 
15704  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15705  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15706  allocation);
15707  Flush();
15708 }
15709 
15710 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
15711  VmaAllocation allocation)
15712 {
15713  CallParams callParams;
15714  GetBasicParams(callParams);
15715 
15716  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15717  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15718  allocation);
15719  Flush();
15720 }
15721 
15722 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
15723  VmaAllocation allocation)
15724 {
15725  CallParams callParams;
15726  GetBasicParams(callParams);
15727 
15728  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15729  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15730  allocation);
15731  Flush();
15732 }
15733 
15734 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
15735  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15736 {
15737  CallParams callParams;
15738  GetBasicParams(callParams);
15739 
15740  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15741  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15742  allocation,
15743  offset,
15744  size);
15745  Flush();
15746 }
15747 
15748 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
15749  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15750 {
15751  CallParams callParams;
15752  GetBasicParams(callParams);
15753 
15754  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15755  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15756  allocation,
15757  offset,
15758  size);
15759  Flush();
15760 }
15761 
15762 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
15763  const VkBufferCreateInfo& bufCreateInfo,
15764  const VmaAllocationCreateInfo& allocCreateInfo,
15765  VmaAllocation allocation)
15766 {
15767  CallParams callParams;
15768  GetBasicParams(callParams);
15769 
15770  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15771  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15772  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15773  bufCreateInfo.flags,
15774  bufCreateInfo.size,
15775  bufCreateInfo.usage,
15776  bufCreateInfo.sharingMode,
15777  allocCreateInfo.flags,
15778  allocCreateInfo.usage,
15779  allocCreateInfo.requiredFlags,
15780  allocCreateInfo.preferredFlags,
15781  allocCreateInfo.memoryTypeBits,
15782  allocCreateInfo.pool,
15783  allocation,
15784  userDataStr.GetString());
15785  Flush();
15786 }
15787 
15788 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
15789  const VkImageCreateInfo& imageCreateInfo,
15790  const VmaAllocationCreateInfo& allocCreateInfo,
15791  VmaAllocation allocation)
15792 {
15793  CallParams callParams;
15794  GetBasicParams(callParams);
15795 
15796  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15797  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15798  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15799  imageCreateInfo.flags,
15800  imageCreateInfo.imageType,
15801  imageCreateInfo.format,
15802  imageCreateInfo.extent.width,
15803  imageCreateInfo.extent.height,
15804  imageCreateInfo.extent.depth,
15805  imageCreateInfo.mipLevels,
15806  imageCreateInfo.arrayLayers,
15807  imageCreateInfo.samples,
15808  imageCreateInfo.tiling,
15809  imageCreateInfo.usage,
15810  imageCreateInfo.sharingMode,
15811  imageCreateInfo.initialLayout,
15812  allocCreateInfo.flags,
15813  allocCreateInfo.usage,
15814  allocCreateInfo.requiredFlags,
15815  allocCreateInfo.preferredFlags,
15816  allocCreateInfo.memoryTypeBits,
15817  allocCreateInfo.pool,
15818  allocation,
15819  userDataStr.GetString());
15820  Flush();
15821 }
15822 
15823 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
15824  VmaAllocation allocation)
15825 {
15826  CallParams callParams;
15827  GetBasicParams(callParams);
15828 
15829  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15830  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
15831  allocation);
15832  Flush();
15833 }
15834 
15835 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
15836  VmaAllocation allocation)
15837 {
15838  CallParams callParams;
15839  GetBasicParams(callParams);
15840 
15841  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15842  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
15843  allocation);
15844  Flush();
15845 }
15846 
15847 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
15848  VmaAllocation allocation)
15849 {
15850  CallParams callParams;
15851  GetBasicParams(callParams);
15852 
15853  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15854  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15855  allocation);
15856  Flush();
15857 }
15858 
15859 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
15860  VmaAllocation allocation)
15861 {
15862  CallParams callParams;
15863  GetBasicParams(callParams);
15864 
15865  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15866  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
15867  allocation);
15868  Flush();
15869 }
15870 
15871 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
15872  VmaPool pool)
15873 {
15874  CallParams callParams;
15875  GetBasicParams(callParams);
15876 
15877  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15878  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
15879  pool);
15880  Flush();
15881 }
15882 
15883 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
15884  const VmaDefragmentationInfo2& info,
15886 {
15887  CallParams callParams;
15888  GetBasicParams(callParams);
15889 
15890  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15891  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
15892  info.flags);
15893  PrintPointerList(info.allocationCount, info.pAllocations);
15894  fprintf(m_File, ",");
15895  PrintPointerList(info.poolCount, info.pPools);
15896  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
15897  info.maxCpuBytesToMove,
15899  info.maxGpuBytesToMove,
15901  info.commandBuffer,
15902  ctx);
15903  Flush();
15904 }
15905 
15906 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
15908 {
15909  CallParams callParams;
15910  GetBasicParams(callParams);
15911 
15912  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15913  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
15914  ctx);
15915  Flush();
15916 }
15917 
15918 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
15919  VmaPool pool,
15920  const char* name)
15921 {
15922  CallParams callParams;
15923  GetBasicParams(callParams);
15924 
15925  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15926  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15927  pool, name != VMA_NULL ? name : "");
15928  Flush();
15929 }
15930 
15931 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
15932 {
15933  if(pUserData != VMA_NULL)
15934  {
15935  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
15936  {
15937  m_Str = (const char*)pUserData;
15938  }
15939  else
15940  {
15941  // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it.
15942  snprintf(m_PtrStr, 17, "%p", pUserData);
15943  m_Str = m_PtrStr;
15944  }
15945  }
15946  else
15947  {
15948  m_Str = "";
15949  }
15950 }
15951 
15952 void VmaRecorder::WriteConfiguration(
15953  const VkPhysicalDeviceProperties& devProps,
15954  const VkPhysicalDeviceMemoryProperties& memProps,
15955  uint32_t vulkanApiVersion,
15956  bool dedicatedAllocationExtensionEnabled,
15957  bool bindMemory2ExtensionEnabled,
15958  bool memoryBudgetExtensionEnabled,
15959  bool deviceCoherentMemoryExtensionEnabled)
15960 {
15961  fprintf(m_File, "Config,Begin\n");
15962 
15963  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
15964 
15965  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
15966  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
15967  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
15968  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
15969  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
15970  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
15971 
15972  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
15973  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
15974  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
15975 
15976  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
15977  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
15978  {
15979  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
15980  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
15981  }
15982  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
15983  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
15984  {
15985  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
15986  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
15987  }
15988 
15989  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
15990  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
15991  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
15992  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
15993 
15994  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
15995  fprintf(m_File, "Macro,VMA_MIN_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_MIN_ALIGNMENT);
15996  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
15997  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
15998  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
15999  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
16000  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
16001  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
16002  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
16003 
16004  fprintf(m_File, "Config,End\n");
16005 }
16006 
16007 void VmaRecorder::GetBasicParams(CallParams& outParams)
16008 {
16009  #if defined(_WIN32)
16010  outParams.threadId = GetCurrentThreadId();
16011  #else
16012  // Use C++11 features to get thread id and convert it to uint32_t.
16013  // There is room for optimization since sstream is quite slow.
16014  // Is there a better way to convert std::this_thread::get_id() to uint32_t?
16015  std::thread::id thread_id = std::this_thread::get_id();
16016  std::stringstream thread_id_to_string_converter;
16017  thread_id_to_string_converter << thread_id;
16018  std::string thread_id_as_string = thread_id_to_string_converter.str();
16019  outParams.threadId = static_cast<uint32_t>(std::stoi(thread_id_as_string.c_str()));
16020  #endif
16021 
16022  auto current_time = std::chrono::high_resolution_clock::now();
16023 
16024  outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
16025 }
16026 
16027 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
16028 {
16029  if(count)
16030  {
16031  fprintf(m_File, "%p", pItems[0]);
16032  for(uint64_t i = 1; i < count; ++i)
16033  {
16034  fprintf(m_File, " %p", pItems[i]);
16035  }
16036  }
16037 }
16038 
16039 void VmaRecorder::Flush()
16040 {
16041  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
16042  {
16043  fflush(m_File);
16044  }
16045 }
16046 
16047 #endif // #if VMA_RECORDING_ENABLED
16048 
16050 // VmaAllocationObjectAllocator
16051 
16052 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
16053  m_Allocator(pAllocationCallbacks, 1024)
16054 {
16055 }
16056 
16057 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
16058 {
16059  VmaMutexLock mutexLock(m_Mutex);
16060  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
16061 }
16062 
16063 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
16064 {
16065  VmaMutexLock mutexLock(m_Mutex);
16066  m_Allocator.Free(hAlloc);
16067 }
16068 
16070 // VmaAllocator_T
16071 
16072 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
16073  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
16074  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
16075  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
16076  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
16077  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
16078  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
16079  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
16080  m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
16081  m_hDevice(pCreateInfo->device),
16082  m_hInstance(pCreateInfo->instance),
16083  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
16084  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
16085  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
16086  m_AllocationObjectAllocator(&m_AllocationCallbacks),
16087  m_HeapSizeLimitMask(0),
16088  m_DeviceMemoryCount(0),
16089  m_PreferredLargeHeapBlockSize(0),
16090  m_PhysicalDevice(pCreateInfo->physicalDevice),
16091  m_CurrentFrameIndex(0),
16092  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
16093  m_NextPoolId(0),
16094  m_GlobalMemoryTypeBits(UINT32_MAX)
16096  ,m_pRecorder(VMA_NULL)
16097 #endif
16098 {
16099  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16100  {
16101  m_UseKhrDedicatedAllocation = false;
16102  m_UseKhrBindMemory2 = false;
16103  }
16104 
16105  if(VMA_DEBUG_DETECT_CORRUPTION)
16106  {
16107  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
16108  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
16109  }
16110 
16111  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
16112 
16113  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
16114  {
16115 #if !(VMA_DEDICATED_ALLOCATION)
16117  {
16118  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
16119  }
16120 #endif
16121 #if !(VMA_BIND_MEMORY2)
16122  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
16123  {
16124  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
16125  }
16126 #endif
16127  }
16128 #if !(VMA_MEMORY_BUDGET)
16129  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
16130  {
16131  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
16132  }
16133 #endif
16134 #if !(VMA_BUFFER_DEVICE_ADDRESS)
16135  if(m_UseKhrBufferDeviceAddress)
16136  {
16137  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
16138  }
16139 #endif
16140 #if VMA_VULKAN_VERSION < 1002000
16141  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
16142  {
16143  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
16144  }
16145 #endif
16146 #if VMA_VULKAN_VERSION < 1001000
16147  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16148  {
16149  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
16150  }
16151 #endif
16152 #if !(VMA_MEMORY_PRIORITY)
16153  if(m_UseExtMemoryPriority)
16154  {
16155  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
16156  }
16157 #endif
16158 
16159  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
16160  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
16161  memset(&m_MemProps, 0, sizeof(m_MemProps));
16162 
16163  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
16164  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
16165 
16166 #if VMA_EXTERNAL_MEMORY
16167  memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));
16168 #endif // #if VMA_EXTERNAL_MEMORY
16169 
16170  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
16171  {
16172  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
16173  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
16174  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
16175  }
16176 
16177  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
16178 
16179  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
16180  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
16181 
16182  VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
16183  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
16184  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
16185  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
16186 
16187  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
16188  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
16189 
16190  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
16191 
16192 #if VMA_EXTERNAL_MEMORY
16193  if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)
16194  {
16195  memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,
16196  sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());
16197  }
16198 #endif // #if VMA_EXTERNAL_MEMORY
16199 
16200  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
16201  {
16202  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16203  {
16204  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
16205  if(limit != VK_WHOLE_SIZE)
16206  {
16207  m_HeapSizeLimitMask |= 1u << heapIndex;
16208  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
16209  {
16210  m_MemProps.memoryHeaps[heapIndex].size = limit;
16211  }
16212  }
16213  }
16214  }
16215 
16216  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16217  {
16218  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
16219 
16220  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
16221  this,
16222  VK_NULL_HANDLE, // hParentPool
16223  memTypeIndex,
16224  preferredBlockSize,
16225  0,
16226  SIZE_MAX,
16227  GetBufferImageGranularity(),
16228  pCreateInfo->frameInUseCount,
16229  false, // explicitBlockSize
16230  false, // linearAlgorithm
16231  0.5f, // priority (0.5 is the default per Vulkan spec)
16232  GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
16233  VMA_NULL); // // pMemoryAllocateNext
16234  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
16235  // becase minBlockCount is 0.
16236  }
16237 }
16238 
16239 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
16240 {
16241  VkResult res = VK_SUCCESS;
16242 
16243  if(pCreateInfo->pRecordSettings != VMA_NULL &&
16244  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
16245  {
16246 #if VMA_RECORDING_ENABLED
16247  m_pRecorder = vma_new(this, VmaRecorder)();
16248  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
16249  if(res != VK_SUCCESS)
16250  {
16251  return res;
16252  }
16253  m_pRecorder->WriteConfiguration(
16254  m_PhysicalDeviceProperties,
16255  m_MemProps,
16256  m_VulkanApiVersion,
16257  m_UseKhrDedicatedAllocation,
16258  m_UseKhrBindMemory2,
16259  m_UseExtMemoryBudget,
16260  m_UseAmdDeviceCoherentMemory);
16261  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
16262 #else
16263  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
16264  return VK_ERROR_FEATURE_NOT_PRESENT;
16265 #endif
16266  }
16267 
16268 #if VMA_MEMORY_BUDGET
16269  if(m_UseExtMemoryBudget)
16270  {
16271  UpdateVulkanBudget();
16272  }
16273 #endif // #if VMA_MEMORY_BUDGET
16274 
16275  return res;
16276 }
16277 
16278 VmaAllocator_T::~VmaAllocator_T()
16279 {
16280 #if VMA_RECORDING_ENABLED
16281  if(m_pRecorder != VMA_NULL)
16282  {
16283  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
16284  vma_delete(this, m_pRecorder);
16285  }
16286 #endif
16287 
16288  VMA_ASSERT(m_Pools.IsEmpty());
16289 
16290  for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
16291  {
16292  if(!m_DedicatedAllocations[memTypeIndex].IsEmpty())
16293  {
16294  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
16295  }
16296 
16297  vma_delete(this, m_pBlockVectors[memTypeIndex]);
16298  }
16299 }
16300 
16301 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
16302 {
16303 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
16304  ImportVulkanFunctions_Static();
16305 #endif
16306 
16307  if(pVulkanFunctions != VMA_NULL)
16308  {
16309  ImportVulkanFunctions_Custom(pVulkanFunctions);
16310  }
16311 
16312 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16313  ImportVulkanFunctions_Dynamic();
16314 #endif
16315 
16316  ValidateVulkanFunctions();
16317 }
16318 
16319 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
16320 
16321 void VmaAllocator_T::ImportVulkanFunctions_Static()
16322 {
16323  // Vulkan 1.0
16324  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
16325  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
16326  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
16327  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
16328  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
16329  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
16330  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
16331  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
16332  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
16333  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
16334  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
16335  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
16336  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
16337  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
16338  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
16339  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
16340  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
16341 
16342  // Vulkan 1.1
16343 #if VMA_VULKAN_VERSION >= 1001000
16344  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16345  {
16346  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
16347  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
16348  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
16349  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
16350  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
16351  }
16352 #endif
16353 }
16354 
16355 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
16356 
16357 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
16358 {
16359  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
16360 
16361 #define VMA_COPY_IF_NOT_NULL(funcName) \
16362  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
16363 
16364  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
16365  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
16366  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
16367  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
16368  VMA_COPY_IF_NOT_NULL(vkMapMemory);
16369  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
16370  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
16371  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
16372  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
16373  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
16374  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
16375  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
16376  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
16377  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
16378  VMA_COPY_IF_NOT_NULL(vkCreateImage);
16379  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
16380  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
16381 
16382 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16383  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
16384  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
16385 #endif
16386 
16387 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
16388  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
16389  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
16390 #endif
16391 
16392 #if VMA_MEMORY_BUDGET
16393  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
16394 #endif
16395 
16396 #undef VMA_COPY_IF_NOT_NULL
16397 }
16398 
16399 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16400 
16401 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
16402 {
16403 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
16404  if(m_VulkanFunctions.memberName == VMA_NULL) \
16405  m_VulkanFunctions.memberName = \
16406  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
16407 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
16408  if(m_VulkanFunctions.memberName == VMA_NULL) \
16409  m_VulkanFunctions.memberName = \
16410  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
16411 
16412  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
16413  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
16414  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
16415  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
16416  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
16417  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
16418  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
16419  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
16420  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
16421  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
16422  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
16423  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
16424  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
16425  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
16426  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
16427  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
16428  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
16429 
16430 #if VMA_VULKAN_VERSION >= 1001000
16431  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16432  {
16433  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
16434  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
16435  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
16436  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
16437  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
16438  }
16439 #endif
16440 
16441 #if VMA_DEDICATED_ALLOCATION
16442  if(m_UseKhrDedicatedAllocation)
16443  {
16444  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
16445  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
16446  }
16447 #endif
16448 
16449 #if VMA_BIND_MEMORY2
16450  if(m_UseKhrBindMemory2)
16451  {
16452  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
16453  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
16454  }
16455 #endif // #if VMA_BIND_MEMORY2
16456 
16457 #if VMA_MEMORY_BUDGET
16458  if(m_UseExtMemoryBudget)
16459  {
16460  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
16461  }
16462 #endif // #if VMA_MEMORY_BUDGET
16463 
16464 #undef VMA_FETCH_DEVICE_FUNC
16465 #undef VMA_FETCH_INSTANCE_FUNC
16466 }
16467 
16468 #endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16469 
16470 void VmaAllocator_T::ValidateVulkanFunctions()
16471 {
16472  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
16473  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
16474  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
16475  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
16476  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
16477  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
16478  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
16479  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
16480  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
16481  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
16482  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
16483  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
16484  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
16485  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
16486  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
16487  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
16488  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
16489 
16490 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16491  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
16492  {
16493  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
16494  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
16495  }
16496 #endif
16497 
16498 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
16499  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
16500  {
16501  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
16502  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
16503  }
16504 #endif
16505 
16506 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
16507  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16508  {
16509  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
16510  }
16511 #endif
16512 }
16513 
16514 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
16515 {
16516  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16517  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16518  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
16519  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
16520 }
16521 
16522 VkResult VmaAllocator_T::AllocateMemoryOfType(
16523  VkDeviceSize size,
16524  VkDeviceSize alignment,
16525  bool dedicatedAllocation,
16526  VkBuffer dedicatedBuffer,
16527  VkBufferUsageFlags dedicatedBufferUsage,
16528  VkImage dedicatedImage,
16529  const VmaAllocationCreateInfo& createInfo,
16530  uint32_t memTypeIndex,
16531  VmaSuballocationType suballocType,
16532  size_t allocationCount,
16533  VmaAllocation* pAllocations)
16534 {
16535  VMA_ASSERT(pAllocations != VMA_NULL);
16536  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
16537 
16538  VmaAllocationCreateInfo finalCreateInfo = createInfo;
16539 
16540  // If memory type is not HOST_VISIBLE, disable MAPPED.
16541  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16542  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16543  {
16544  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16545  }
16546  // If memory is lazily allocated, it should be always dedicated.
16547  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
16548  {
16550  }
16551 
16552  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
16553  VMA_ASSERT(blockVector);
16554 
16555  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
16556  bool preferDedicatedMemory =
16557  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
16558  dedicatedAllocation ||
16559  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
16560  size > preferredBlockSize / 2;
16561 
16562  if(preferDedicatedMemory &&
16563  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
16564  finalCreateInfo.pool == VK_NULL_HANDLE)
16565  {
16567  }
16568 
16569  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
16570  {
16571  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16572  {
16573  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16574  }
16575  else
16576  {
16577  return AllocateDedicatedMemory(
16578  size,
16579  suballocType,
16580  memTypeIndex,
16581  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16582  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16583  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16584  finalCreateInfo.pUserData,
16585  finalCreateInfo.priority,
16586  dedicatedBuffer,
16587  dedicatedBufferUsage,
16588  dedicatedImage,
16589  allocationCount,
16590  pAllocations);
16591  }
16592  }
16593  else
16594  {
16595  VkResult res = blockVector->Allocate(
16596  m_CurrentFrameIndex.load(),
16597  size,
16598  alignment,
16599  finalCreateInfo,
16600  suballocType,
16601  allocationCount,
16602  pAllocations);
16603  if(res == VK_SUCCESS)
16604  {
16605  return res;
16606  }
16607 
16608  // 5. Try dedicated memory.
16609  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16610  {
16611  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16612  }
16613 
16614  // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
16615  // which can quickly deplete maxMemoryAllocationCount: Don't try dedicated allocations when above
16616  // 3/4 of the maximum allocation count.
16617  if(m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
16618  {
16619  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16620  }
16621 
16622  res = AllocateDedicatedMemory(
16623  size,
16624  suballocType,
16625  memTypeIndex,
16626  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16627  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16628  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16629  finalCreateInfo.pUserData,
16630  finalCreateInfo.priority,
16631  dedicatedBuffer,
16632  dedicatedBufferUsage,
16633  dedicatedImage,
16634  allocationCount,
16635  pAllocations);
16636  if(res == VK_SUCCESS)
16637  {
16638  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
16639  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
16640  return VK_SUCCESS;
16641  }
16642  else
16643  {
16644  // Everything failed: Return error code.
16645  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16646  return res;
16647  }
16648  }
16649 }
16650 
16651 VkResult VmaAllocator_T::AllocateDedicatedMemory(
16652  VkDeviceSize size,
16653  VmaSuballocationType suballocType,
16654  uint32_t memTypeIndex,
16655  bool withinBudget,
16656  bool map,
16657  bool isUserDataString,
16658  void* pUserData,
16659  float priority,
16660  VkBuffer dedicatedBuffer,
16661  VkBufferUsageFlags dedicatedBufferUsage,
16662  VkImage dedicatedImage,
16663  size_t allocationCount,
16664  VmaAllocation* pAllocations)
16665 {
16666  VMA_ASSERT(allocationCount > 0 && pAllocations);
16667 
16668  if(withinBudget)
16669  {
16670  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16671  VmaBudget heapBudget = {};
16672  GetBudget(&heapBudget, heapIndex, 1);
16673  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
16674  {
16675  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16676  }
16677  }
16678 
16679  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
16680  allocInfo.memoryTypeIndex = memTypeIndex;
16681  allocInfo.allocationSize = size;
16682 
16683 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16684  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
16685  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16686  {
16687  if(dedicatedBuffer != VK_NULL_HANDLE)
16688  {
16689  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
16690  dedicatedAllocInfo.buffer = dedicatedBuffer;
16691  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16692  }
16693  else if(dedicatedImage != VK_NULL_HANDLE)
16694  {
16695  dedicatedAllocInfo.image = dedicatedImage;
16696  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16697  }
16698  }
16699 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16700 
16701 #if VMA_BUFFER_DEVICE_ADDRESS
16702  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
16703  if(m_UseKhrBufferDeviceAddress)
16704  {
16705  bool canContainBufferWithDeviceAddress = true;
16706  if(dedicatedBuffer != VK_NULL_HANDLE)
16707  {
16708  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
16709  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
16710  }
16711  else if(dedicatedImage != VK_NULL_HANDLE)
16712  {
16713  canContainBufferWithDeviceAddress = false;
16714  }
16715  if(canContainBufferWithDeviceAddress)
16716  {
16717  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
16718  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
16719  }
16720  }
16721 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
16722 
16723 #if VMA_MEMORY_PRIORITY
16724  VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
16725  if(m_UseExtMemoryPriority)
16726  {
16727  priorityInfo.priority = priority;
16728  VmaPnextChainPushFront(&allocInfo, &priorityInfo);
16729  }
16730 #endif // #if VMA_MEMORY_PRIORITY
16731 
16732 #if VMA_EXTERNAL_MEMORY
16733  // Attach VkExportMemoryAllocateInfoKHR if necessary.
16734  VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
16735  exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);
16736  if(exportMemoryAllocInfo.handleTypes != 0)
16737  {
16738  VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
16739  }
16740 #endif // #if VMA_EXTERNAL_MEMORY
16741 
16742  size_t allocIndex;
16743  VkResult res = VK_SUCCESS;
16744  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16745  {
16746  res = AllocateDedicatedMemoryPage(
16747  size,
16748  suballocType,
16749  memTypeIndex,
16750  allocInfo,
16751  map,
16752  isUserDataString,
16753  pUserData,
16754  pAllocations + allocIndex);
16755  if(res != VK_SUCCESS)
16756  {
16757  break;
16758  }
16759  }
16760 
16761  if(res == VK_SUCCESS)
16762  {
16763  // Register them in m_DedicatedAllocations.
16764  {
16765  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16766  DedicatedAllocationLinkedList& dedicatedAllocations = m_DedicatedAllocations[memTypeIndex];
16767  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16768  {
16769  dedicatedAllocations.PushBack(pAllocations[allocIndex]);
16770  }
16771  }
16772 
16773  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
16774  }
16775  else
16776  {
16777  // Free all already created allocations.
16778  while(allocIndex--)
16779  {
16780  VmaAllocation currAlloc = pAllocations[allocIndex];
16781  VkDeviceMemory hMemory = currAlloc->GetMemory();
16782 
16783  /*
16784  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16785  before vkFreeMemory.
16786 
16787  if(currAlloc->GetMappedData() != VMA_NULL)
16788  {
16789  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16790  }
16791  */
16792 
16793  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
16794  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
16795  currAlloc->SetUserData(this, VMA_NULL);
16796  m_AllocationObjectAllocator.Free(currAlloc);
16797  }
16798 
16799  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16800  }
16801 
16802  return res;
16803 }
16804 
16805 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
16806  VkDeviceSize size,
16807  VmaSuballocationType suballocType,
16808  uint32_t memTypeIndex,
16809  const VkMemoryAllocateInfo& allocInfo,
16810  bool map,
16811  bool isUserDataString,
16812  void* pUserData,
16813  VmaAllocation* pAllocation)
16814 {
16815  VkDeviceMemory hMemory = VK_NULL_HANDLE;
16816  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
16817  if(res < 0)
16818  {
16819  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16820  return res;
16821  }
16822 
16823  void* pMappedData = VMA_NULL;
16824  if(map)
16825  {
16826  res = (*m_VulkanFunctions.vkMapMemory)(
16827  m_hDevice,
16828  hMemory,
16829  0,
16830  VK_WHOLE_SIZE,
16831  0,
16832  &pMappedData);
16833  if(res < 0)
16834  {
16835  VMA_DEBUG_LOG(" vkMapMemory FAILED");
16836  FreeVulkanMemory(memTypeIndex, size, hMemory);
16837  return res;
16838  }
16839  }
16840 
16841  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
16842  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
16843  (*pAllocation)->SetUserData(this, pUserData);
16844  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
16845  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16846  {
16847  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
16848  }
16849 
16850  return VK_SUCCESS;
16851 }
16852 
16853 void VmaAllocator_T::GetBufferMemoryRequirements(
16854  VkBuffer hBuffer,
16855  VkMemoryRequirements& memReq,
16856  bool& requiresDedicatedAllocation,
16857  bool& prefersDedicatedAllocation) const
16858 {
16859 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16860  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16861  {
16862  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
16863  memReqInfo.buffer = hBuffer;
16864 
16865  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16866 
16867  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16868  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16869 
16870  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16871 
16872  memReq = memReq2.memoryRequirements;
16873  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16874  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16875  }
16876  else
16877 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16878  {
16879  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
16880  requiresDedicatedAllocation = false;
16881  prefersDedicatedAllocation = false;
16882  }
16883 }
16884 
16885 void VmaAllocator_T::GetImageMemoryRequirements(
16886  VkImage hImage,
16887  VkMemoryRequirements& memReq,
16888  bool& requiresDedicatedAllocation,
16889  bool& prefersDedicatedAllocation) const
16890 {
16891 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16892  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16893  {
16894  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
16895  memReqInfo.image = hImage;
16896 
16897  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16898 
16899  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16900  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16901 
16902  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16903 
16904  memReq = memReq2.memoryRequirements;
16905  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16906  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16907  }
16908  else
16909 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16910  {
16911  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
16912  requiresDedicatedAllocation = false;
16913  prefersDedicatedAllocation = false;
16914  }
16915 }
16916 
16917 VkResult VmaAllocator_T::AllocateMemory(
16918  const VkMemoryRequirements& vkMemReq,
16919  bool requiresDedicatedAllocation,
16920  bool prefersDedicatedAllocation,
16921  VkBuffer dedicatedBuffer,
16922  VkBufferUsageFlags dedicatedBufferUsage,
16923  VkImage dedicatedImage,
16924  const VmaAllocationCreateInfo& createInfo,
16925  VmaSuballocationType suballocType,
16926  size_t allocationCount,
16927  VmaAllocation* pAllocations)
16928 {
16929  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16930 
16931  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
16932 
16933  if(vkMemReq.size == 0)
16934  {
16935  return VK_ERROR_VALIDATION_FAILED_EXT;
16936  }
16937  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
16938  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16939  {
16940  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
16941  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16942  }
16943  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16945  {
16946  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
16947  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16948  }
16949  if(requiresDedicatedAllocation)
16950  {
16951  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16952  {
16953  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
16954  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16955  }
16956  if(createInfo.pool != VK_NULL_HANDLE)
16957  {
16958  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
16959  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16960  }
16961  }
16962  if((createInfo.pool != VK_NULL_HANDLE) &&
16963  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
16964  {
16965  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
16966  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16967  }
16968 
16969  if(createInfo.pool != VK_NULL_HANDLE)
16970  {
16971  VmaAllocationCreateInfo createInfoForPool = createInfo;
16972  // If memory type is not HOST_VISIBLE, disable MAPPED.
16973  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16974  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16975  {
16976  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16977  }
16978 
16979  return createInfo.pool->m_BlockVector.Allocate(
16980  m_CurrentFrameIndex.load(),
16981  vkMemReq.size,
16982  vkMemReq.alignment,
16983  createInfoForPool,
16984  suballocType,
16985  allocationCount,
16986  pAllocations);
16987  }
16988  else
16989  {
16990  // Bit mask of memory Vulkan types acceptable for this allocation.
16991  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
16992  uint32_t memTypeIndex = UINT32_MAX;
16993  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16994  if(res == VK_SUCCESS)
16995  {
16996  res = AllocateMemoryOfType(
16997  vkMemReq.size,
16998  vkMemReq.alignment,
16999  requiresDedicatedAllocation || prefersDedicatedAllocation,
17000  dedicatedBuffer,
17001  dedicatedBufferUsage,
17002  dedicatedImage,
17003  createInfo,
17004  memTypeIndex,
17005  suballocType,
17006  allocationCount,
17007  pAllocations);
17008  // Succeeded on first try.
17009  if(res == VK_SUCCESS)
17010  {
17011  return res;
17012  }
17013  // Allocation from this memory type failed. Try other compatible memory types.
17014  else
17015  {
17016  for(;;)
17017  {
17018  // Remove old memTypeIndex from list of possibilities.
17019  memoryTypeBits &= ~(1u << memTypeIndex);
17020  // Find alternative memTypeIndex.
17021  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
17022  if(res == VK_SUCCESS)
17023  {
17024  res = AllocateMemoryOfType(
17025  vkMemReq.size,
17026  vkMemReq.alignment,
17027  requiresDedicatedAllocation || prefersDedicatedAllocation,
17028  dedicatedBuffer,
17029  dedicatedBufferUsage,
17030  dedicatedImage,
17031  createInfo,
17032  memTypeIndex,
17033  suballocType,
17034  allocationCount,
17035  pAllocations);
17036  // Allocation from this alternative memory type succeeded.
17037  if(res == VK_SUCCESS)
17038  {
17039  return res;
17040  }
17041  // else: Allocation from this memory type failed. Try next one - next loop iteration.
17042  }
17043  // No other matching memory type index could be found.
17044  else
17045  {
17046  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
17047  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
17048  }
17049  }
17050  }
17051  }
17052  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
17053  else
17054  return res;
17055  }
17056 }
17057 
17058 void VmaAllocator_T::FreeMemory(
17059  size_t allocationCount,
17060  const VmaAllocation* pAllocations)
17061 {
17062  VMA_ASSERT(pAllocations);
17063 
17064  for(size_t allocIndex = allocationCount; allocIndex--; )
17065  {
17066  VmaAllocation allocation = pAllocations[allocIndex];
17067 
17068  if(allocation != VK_NULL_HANDLE)
17069  {
17070  if(TouchAllocation(allocation))
17071  {
17072  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
17073  {
17074  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
17075  }
17076 
17077  switch(allocation->GetType())
17078  {
17079  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17080  {
17081  VmaBlockVector* pBlockVector = VMA_NULL;
17082  VmaPool hPool = allocation->GetBlock()->GetParentPool();
17083  if(hPool != VK_NULL_HANDLE)
17084  {
17085  pBlockVector = &hPool->m_BlockVector;
17086  }
17087  else
17088  {
17089  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17090  pBlockVector = m_pBlockVectors[memTypeIndex];
17091  }
17092  pBlockVector->Free(allocation);
17093  }
17094  break;
17095  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17096  FreeDedicatedMemory(allocation);
17097  break;
17098  default:
17099  VMA_ASSERT(0);
17100  }
17101  }
17102 
17103  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
17104  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
17105  allocation->SetUserData(this, VMA_NULL);
17106  m_AllocationObjectAllocator.Free(allocation);
17107  }
17108  }
17109 }
17110 
17111 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
17112 {
17113  // Initialize.
17114  InitStatInfo(pStats->total);
17115  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
17116  InitStatInfo(pStats->memoryType[i]);
17117  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
17118  InitStatInfo(pStats->memoryHeap[i]);
17119 
17120  // Process default pools.
17121  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17122  {
17123  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
17124  VMA_ASSERT(pBlockVector);
17125  pBlockVector->AddStats(pStats);
17126  }
17127 
17128  // Process custom pools.
17129  {
17130  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17131  for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
17132  {
17133  pool->m_BlockVector.AddStats(pStats);
17134  }
17135  }
17136 
17137  // Process dedicated allocations.
17138  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17139  {
17140  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
17141  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17142  DedicatedAllocationLinkedList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
17143  for(VmaAllocation alloc = dedicatedAllocList.Front();
17144  alloc != VMA_NULL; alloc = dedicatedAllocList.GetNext(alloc))
17145  {
17146  VmaStatInfo allocationStatInfo;
17147  alloc->DedicatedAllocCalcStatsInfo(allocationStatInfo);
17148  VmaAddStatInfo(pStats->total, allocationStatInfo);
17149  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
17150  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
17151  }
17152  }
17153 
17154  // Postprocess.
17155  VmaPostprocessCalcStatInfo(pStats->total);
17156  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
17157  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
17158  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
17159  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
17160 }
17161 
17162 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
17163 {
17164 #if VMA_MEMORY_BUDGET
17165  if(m_UseExtMemoryBudget)
17166  {
17167  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
17168  {
17169  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
17170  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
17171  {
17172  const uint32_t heapIndex = firstHeap + i;
17173 
17174  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
17175  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
17176 
17177  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
17178  {
17179  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
17180  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
17181  }
17182  else
17183  {
17184  outBudget->usage = 0;
17185  }
17186 
17187  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
17188  outBudget->budget = VMA_MIN(
17189  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
17190  }
17191  }
17192  else
17193  {
17194  UpdateVulkanBudget(); // Outside of mutex lock
17195  GetBudget(outBudget, firstHeap, heapCount); // Recursion
17196  }
17197  }
17198  else
17199 #endif
17200  {
17201  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
17202  {
17203  const uint32_t heapIndex = firstHeap + i;
17204 
17205  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
17206  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
17207 
17208  outBudget->usage = outBudget->blockBytes;
17209  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
17210  }
17211  }
17212 }
17213 
17214 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
17215 
17216 VkResult VmaAllocator_T::DefragmentationBegin(
17217  const VmaDefragmentationInfo2& info,
17218  VmaDefragmentationStats* pStats,
17219  VmaDefragmentationContext* pContext)
17220 {
17221  if(info.pAllocationsChanged != VMA_NULL)
17222  {
17223  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
17224  }
17225 
17226  *pContext = vma_new(this, VmaDefragmentationContext_T)(
17227  this, m_CurrentFrameIndex.load(), info.flags, pStats);
17228 
17229  (*pContext)->AddPools(info.poolCount, info.pPools);
17230  (*pContext)->AddAllocations(
17232 
17233  VkResult res = (*pContext)->Defragment(
17236  info.commandBuffer, pStats, info.flags);
17237 
17238  if(res != VK_NOT_READY)
17239  {
17240  vma_delete(this, *pContext);
17241  *pContext = VMA_NULL;
17242  }
17243 
17244  return res;
17245 }
17246 
17247 VkResult VmaAllocator_T::DefragmentationEnd(
17248  VmaDefragmentationContext context)
17249 {
17250  vma_delete(this, context);
17251  return VK_SUCCESS;
17252 }
17253 
17254 VkResult VmaAllocator_T::DefragmentationPassBegin(
17256  VmaDefragmentationContext context)
17257 {
17258  return context->DefragmentPassBegin(pInfo);
17259 }
17260 VkResult VmaAllocator_T::DefragmentationPassEnd(
17261  VmaDefragmentationContext context)
17262 {
17263  return context->DefragmentPassEnd();
17264 
17265 }
17266 
17267 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
17268 {
17269  if(hAllocation->CanBecomeLost())
17270  {
17271  /*
17272  Warning: This is a carefully designed algorithm.
17273  Do not modify unless you really know what you're doing :)
17274  */
17275  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17276  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17277  for(;;)
17278  {
17279  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
17280  {
17281  pAllocationInfo->memoryType = UINT32_MAX;
17282  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
17283  pAllocationInfo->offset = 0;
17284  pAllocationInfo->size = hAllocation->GetSize();
17285  pAllocationInfo->pMappedData = VMA_NULL;
17286  pAllocationInfo->pUserData = hAllocation->GetUserData();
17287  return;
17288  }
17289  else if(localLastUseFrameIndex == localCurrFrameIndex)
17290  {
17291  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
17292  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
17293  pAllocationInfo->offset = hAllocation->GetOffset();
17294  pAllocationInfo->size = hAllocation->GetSize();
17295  pAllocationInfo->pMappedData = VMA_NULL;
17296  pAllocationInfo->pUserData = hAllocation->GetUserData();
17297  return;
17298  }
17299  else // Last use time earlier than current time.
17300  {
17301  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17302  {
17303  localLastUseFrameIndex = localCurrFrameIndex;
17304  }
17305  }
17306  }
17307  }
17308  else
17309  {
17310 #if VMA_STATS_STRING_ENABLED
17311  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17312  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17313  for(;;)
17314  {
17315  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
17316  if(localLastUseFrameIndex == localCurrFrameIndex)
17317  {
17318  break;
17319  }
17320  else // Last use time earlier than current time.
17321  {
17322  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17323  {
17324  localLastUseFrameIndex = localCurrFrameIndex;
17325  }
17326  }
17327  }
17328 #endif
17329 
17330  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
17331  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
17332  pAllocationInfo->offset = hAllocation->GetOffset();
17333  pAllocationInfo->size = hAllocation->GetSize();
17334  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
17335  pAllocationInfo->pUserData = hAllocation->GetUserData();
17336  }
17337 }
17338 
17339 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
17340 {
17341  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
17342  if(hAllocation->CanBecomeLost())
17343  {
17344  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17345  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17346  for(;;)
17347  {
17348  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
17349  {
17350  return false;
17351  }
17352  else if(localLastUseFrameIndex == localCurrFrameIndex)
17353  {
17354  return true;
17355  }
17356  else // Last use time earlier than current time.
17357  {
17358  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17359  {
17360  localLastUseFrameIndex = localCurrFrameIndex;
17361  }
17362  }
17363  }
17364  }
17365  else
17366  {
17367 #if VMA_STATS_STRING_ENABLED
17368  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17369  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17370  for(;;)
17371  {
17372  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
17373  if(localLastUseFrameIndex == localCurrFrameIndex)
17374  {
17375  break;
17376  }
17377  else // Last use time earlier than current time.
17378  {
17379  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17380  {
17381  localLastUseFrameIndex = localCurrFrameIndex;
17382  }
17383  }
17384  }
17385 #endif
17386 
17387  return true;
17388  }
17389 }
17390 
17391 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
17392 {
17393  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
17394 
17395  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
17396 
17397  // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
17398  if(pCreateInfo->pMemoryAllocateNext)
17399  {
17400  VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
17401  }
17402 
17403  if(newCreateInfo.maxBlockCount == 0)
17404  {
17405  newCreateInfo.maxBlockCount = SIZE_MAX;
17406  }
17407  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
17408  {
17409  return VK_ERROR_INITIALIZATION_FAILED;
17410  }
17411  // Memory type index out of range or forbidden.
17412  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
17413  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
17414  {
17415  return VK_ERROR_FEATURE_NOT_PRESENT;
17416  }
17417  if(newCreateInfo.minAllocationAlignment > 0)
17418  {
17419  VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
17420  }
17421 
17422  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
17423 
17424  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
17425 
17426  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
17427  if(res != VK_SUCCESS)
17428  {
17429  vma_delete(this, *pPool);
17430  *pPool = VMA_NULL;
17431  return res;
17432  }
17433 
17434  // Add to m_Pools.
17435  {
17436  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
17437  (*pPool)->SetId(m_NextPoolId++);
17438  m_Pools.PushBack(*pPool);
17439  }
17440 
17441  return VK_SUCCESS;
17442 }
17443 
17444 void VmaAllocator_T::DestroyPool(VmaPool pool)
17445 {
17446  // Remove from m_Pools.
17447  {
17448  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
17449  m_Pools.Remove(pool);
17450  }
17451 
17452  vma_delete(this, pool);
17453 }
17454 
17455 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
17456 {
17457  pool->m_BlockVector.GetPoolStats(pPoolStats);
17458 }
17459 
17460 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
17461 {
17462  m_CurrentFrameIndex.store(frameIndex);
17463 
17464 #if VMA_MEMORY_BUDGET
17465  if(m_UseExtMemoryBudget)
17466  {
17467  UpdateVulkanBudget();
17468  }
17469 #endif // #if VMA_MEMORY_BUDGET
17470 }
17471 
17472 void VmaAllocator_T::MakePoolAllocationsLost(
17473  VmaPool hPool,
17474  size_t* pLostAllocationCount)
17475 {
17476  hPool->m_BlockVector.MakePoolAllocationsLost(
17477  m_CurrentFrameIndex.load(),
17478  pLostAllocationCount);
17479 }
17480 
17481 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
17482 {
17483  return hPool->m_BlockVector.CheckCorruption();
17484 }
17485 
17486 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
17487 {
17488  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
17489 
17490  // Process default pools.
17491  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17492  {
17493  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
17494  {
17495  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
17496  VMA_ASSERT(pBlockVector);
17497  VkResult localRes = pBlockVector->CheckCorruption();
17498  switch(localRes)
17499  {
17500  case VK_ERROR_FEATURE_NOT_PRESENT:
17501  break;
17502  case VK_SUCCESS:
17503  finalRes = VK_SUCCESS;
17504  break;
17505  default:
17506  return localRes;
17507  }
17508  }
17509  }
17510 
17511  // Process custom pools.
17512  {
17513  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17514  for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
17515  {
17516  if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
17517  {
17518  VkResult localRes = pool->m_BlockVector.CheckCorruption();
17519  switch(localRes)
17520  {
17521  case VK_ERROR_FEATURE_NOT_PRESENT:
17522  break;
17523  case VK_SUCCESS:
17524  finalRes = VK_SUCCESS;
17525  break;
17526  default:
17527  return localRes;
17528  }
17529  }
17530  }
17531  }
17532 
17533  return finalRes;
17534 }
17535 
17536 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
17537 {
17538  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
17539  (*pAllocation)->InitLost();
17540 }
17541 
17542 // An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
17543 template<typename T>
17544 struct AtomicTransactionalIncrement
17545 {
17546 public:
17547  typedef std::atomic<T> AtomicT;
17548  ~AtomicTransactionalIncrement()
17549  {
17550  if(m_Atomic)
17551  --(*m_Atomic);
17552  }
17553  T Increment(AtomicT* atomic)
17554  {
17555  m_Atomic = atomic;
17556  return m_Atomic->fetch_add(1);
17557  }
17558  void Commit()
17559  {
17560  m_Atomic = nullptr;
17561  }
17562 
17563 private:
17564  AtomicT* m_Atomic = nullptr;
17565 };
17566 
17567 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
17568 {
17569  AtomicTransactionalIncrement<uint32_t> deviceMemoryCountIncrement;
17570  const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
17571 #if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
17572  if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
17573  {
17574  return VK_ERROR_TOO_MANY_OBJECTS;
17575  }
17576 #endif
17577 
17578  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
17579 
17580  // HeapSizeLimit is in effect for this heap.
17581  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
17582  {
17583  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
17584  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
17585  for(;;)
17586  {
17587  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
17588  if(blockBytesAfterAllocation > heapSize)
17589  {
17590  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
17591  }
17592  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
17593  {
17594  break;
17595  }
17596  }
17597  }
17598  else
17599  {
17600  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
17601  }
17602 
17603  // VULKAN CALL vkAllocateMemory.
17604  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
17605 
17606  if(res == VK_SUCCESS)
17607  {
17608 #if VMA_MEMORY_BUDGET
17609  ++m_Budget.m_OperationsSinceBudgetFetch;
17610 #endif
17611 
17612  // Informative callback.
17613  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
17614  {
17615  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
17616  }
17617 
17618  deviceMemoryCountIncrement.Commit();
17619  }
17620  else
17621  {
17622  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
17623  }
17624 
17625  return res;
17626 }
17627 
17628 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
17629 {
17630  // Informative callback.
17631  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
17632  {
17633  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
17634  }
17635 
17636  // VULKAN CALL vkFreeMemory.
17637  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
17638 
17639  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
17640 
17641  --m_DeviceMemoryCount;
17642 }
17643 
17644 VkResult VmaAllocator_T::BindVulkanBuffer(
17645  VkDeviceMemory memory,
17646  VkDeviceSize memoryOffset,
17647  VkBuffer buffer,
17648  const void* pNext)
17649 {
17650  if(pNext != VMA_NULL)
17651  {
17652 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17653  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17654  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
17655  {
17656  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
17657  bindBufferMemoryInfo.pNext = pNext;
17658  bindBufferMemoryInfo.buffer = buffer;
17659  bindBufferMemoryInfo.memory = memory;
17660  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17661  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17662  }
17663  else
17664 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17665  {
17666  return VK_ERROR_EXTENSION_NOT_PRESENT;
17667  }
17668  }
17669  else
17670  {
17671  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
17672  }
17673 }
17674 
17675 VkResult VmaAllocator_T::BindVulkanImage(
17676  VkDeviceMemory memory,
17677  VkDeviceSize memoryOffset,
17678  VkImage image,
17679  const void* pNext)
17680 {
17681  if(pNext != VMA_NULL)
17682  {
17683 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17684  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17685  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
17686  {
17687  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
17688  bindBufferMemoryInfo.pNext = pNext;
17689  bindBufferMemoryInfo.image = image;
17690  bindBufferMemoryInfo.memory = memory;
17691  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17692  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17693  }
17694  else
17695 #endif // #if VMA_BIND_MEMORY2
17696  {
17697  return VK_ERROR_EXTENSION_NOT_PRESENT;
17698  }
17699  }
17700  else
17701  {
17702  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
17703  }
17704 }
17705 
17706 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
17707 {
17708  if(hAllocation->CanBecomeLost())
17709  {
17710  return VK_ERROR_MEMORY_MAP_FAILED;
17711  }
17712 
17713  switch(hAllocation->GetType())
17714  {
17715  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17716  {
17717  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17718  char *pBytes = VMA_NULL;
17719  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
17720  if(res == VK_SUCCESS)
17721  {
17722  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
17723  hAllocation->BlockAllocMap();
17724  }
17725  return res;
17726  }
17727  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17728  return hAllocation->DedicatedAllocMap(this, ppData);
17729  default:
17730  VMA_ASSERT(0);
17731  return VK_ERROR_MEMORY_MAP_FAILED;
17732  }
17733 }
17734 
17735 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
17736 {
17737  switch(hAllocation->GetType())
17738  {
17739  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17740  {
17741  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17742  hAllocation->BlockAllocUnmap();
17743  pBlock->Unmap(this, 1);
17744  }
17745  break;
17746  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17747  hAllocation->DedicatedAllocUnmap(this);
17748  break;
17749  default:
17750  VMA_ASSERT(0);
17751  }
17752 }
17753 
17754 VkResult VmaAllocator_T::BindBufferMemory(
17755  VmaAllocation hAllocation,
17756  VkDeviceSize allocationLocalOffset,
17757  VkBuffer hBuffer,
17758  const void* pNext)
17759 {
17760  VkResult res = VK_SUCCESS;
17761  switch(hAllocation->GetType())
17762  {
17763  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17764  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
17765  break;
17766  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17767  {
17768  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17769  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
17770  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
17771  break;
17772  }
17773  default:
17774  VMA_ASSERT(0);
17775  }
17776  return res;
17777 }
17778 
17779 VkResult VmaAllocator_T::BindImageMemory(
17780  VmaAllocation hAllocation,
17781  VkDeviceSize allocationLocalOffset,
17782  VkImage hImage,
17783  const void* pNext)
17784 {
17785  VkResult res = VK_SUCCESS;
17786  switch(hAllocation->GetType())
17787  {
17788  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17789  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
17790  break;
17791  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17792  {
17793  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
17794  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
17795  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
17796  break;
17797  }
17798  default:
17799  VMA_ASSERT(0);
17800  }
17801  return res;
17802 }
17803 
17804 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
17805  VmaAllocation hAllocation,
17806  VkDeviceSize offset, VkDeviceSize size,
17807  VMA_CACHE_OPERATION op)
17808 {
17809  VkResult res = VK_SUCCESS;
17810 
17811  VkMappedMemoryRange memRange = {};
17812  if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
17813  {
17814  switch(op)
17815  {
17816  case VMA_CACHE_FLUSH:
17817  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
17818  break;
17819  case VMA_CACHE_INVALIDATE:
17820  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
17821  break;
17822  default:
17823  VMA_ASSERT(0);
17824  }
17825  }
17826  // else: Just ignore this call.
17827  return res;
17828 }
17829 
17830 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
17831  uint32_t allocationCount,
17832  const VmaAllocation* allocations,
17833  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
17834  VMA_CACHE_OPERATION op)
17835 {
17836  typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
17837  typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
17838  RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
17839 
17840  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
17841  {
17842  const VmaAllocation alloc = allocations[allocIndex];
17843  const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
17844  const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
17845  VkMappedMemoryRange newRange;
17846  if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
17847  {
17848  ranges.push_back(newRange);
17849  }
17850  }
17851 
17852  VkResult res = VK_SUCCESS;
17853  if(!ranges.empty())
17854  {
17855  switch(op)
17856  {
17857  case VMA_CACHE_FLUSH:
17858  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17859  break;
17860  case VMA_CACHE_INVALIDATE:
17861  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17862  break;
17863  default:
17864  VMA_ASSERT(0);
17865  }
17866  }
17867  // else: Just ignore this call.
17868  return res;
17869 }
17870 
17871 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
17872 {
17873  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
17874 
17875  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17876  {
17877  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17878  DedicatedAllocationLinkedList& dedicatedAllocations = m_DedicatedAllocations[memTypeIndex];
17879  dedicatedAllocations.Remove(allocation);
17880  }
17881 
17882  VkDeviceMemory hMemory = allocation->GetMemory();
17883 
17884  /*
17885  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
17886  before vkFreeMemory.
17887 
17888  if(allocation->GetMappedData() != VMA_NULL)
17889  {
17890  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
17891  }
17892  */
17893 
17894  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
17895 
17896  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
17897 }
17898 
17899 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
17900 {
17901  VkBufferCreateInfo dummyBufCreateInfo;
17902  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
17903 
17904  uint32_t memoryTypeBits = 0;
17905 
17906  // Create buffer.
17907  VkBuffer buf = VK_NULL_HANDLE;
17908  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
17909  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
17910  if(res == VK_SUCCESS)
17911  {
17912  // Query for supported memory types.
17913  VkMemoryRequirements memReq;
17914  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
17915  memoryTypeBits = memReq.memoryTypeBits;
17916 
17917  // Destroy buffer.
17918  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
17919  }
17920 
17921  return memoryTypeBits;
17922 }
17923 
17924 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
17925 {
17926  // Make sure memory information is already fetched.
17927  VMA_ASSERT(GetMemoryTypeCount() > 0);
17928 
17929  uint32_t memoryTypeBits = UINT32_MAX;
17930 
17931  if(!m_UseAmdDeviceCoherentMemory)
17932  {
17933  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
17934  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17935  {
17936  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17937  {
17938  memoryTypeBits &= ~(1u << memTypeIndex);
17939  }
17940  }
17941  }
17942 
17943  return memoryTypeBits;
17944 }
17945 
17946 bool VmaAllocator_T::GetFlushOrInvalidateRange(
17947  VmaAllocation allocation,
17948  VkDeviceSize offset, VkDeviceSize size,
17949  VkMappedMemoryRange& outRange) const
17950 {
17951  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17952  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
17953  {
17954  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
17955  const VkDeviceSize allocationSize = allocation->GetSize();
17956  VMA_ASSERT(offset <= allocationSize);
17957 
17958  outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
17959  outRange.pNext = VMA_NULL;
17960  outRange.memory = allocation->GetMemory();
17961 
17962  switch(allocation->GetType())
17963  {
17964  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17965  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17966  if(size == VK_WHOLE_SIZE)
17967  {
17968  outRange.size = allocationSize - outRange.offset;
17969  }
17970  else
17971  {
17972  VMA_ASSERT(offset + size <= allocationSize);
17973  outRange.size = VMA_MIN(
17974  VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
17975  allocationSize - outRange.offset);
17976  }
17977  break;
17978  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17979  {
17980  // 1. Still within this allocation.
17981  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17982  if(size == VK_WHOLE_SIZE)
17983  {
17984  size = allocationSize - offset;
17985  }
17986  else
17987  {
17988  VMA_ASSERT(offset + size <= allocationSize);
17989  }
17990  outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
17991 
17992  // 2. Adjust to whole block.
17993  const VkDeviceSize allocationOffset = allocation->GetOffset();
17994  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
17995  const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
17996  outRange.offset += allocationOffset;
17997  outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
17998 
17999  break;
18000  }
18001  default:
18002  VMA_ASSERT(0);
18003  }
18004  return true;
18005  }
18006  return false;
18007 }
18008 
18009 #if VMA_MEMORY_BUDGET
18010 
18011 void VmaAllocator_T::UpdateVulkanBudget()
18012 {
18013  VMA_ASSERT(m_UseExtMemoryBudget);
18014 
18015  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
18016 
18017  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
18018  VmaPnextChainPushFront(&memProps, &budgetProps);
18019 
18020  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
18021 
18022  {
18023  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
18024 
18025  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
18026  {
18027  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
18028  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
18029  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
18030 
18031  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
18032  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
18033  {
18034  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
18035  }
18036  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
18037  {
18038  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
18039  }
18040  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
18041  {
18042  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
18043  }
18044  }
18045  m_Budget.m_OperationsSinceBudgetFetch = 0;
18046  }
18047 }
18048 
18049 #endif // #if VMA_MEMORY_BUDGET
18050 
18051 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
18052 {
18053  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
18054  !hAllocation->CanBecomeLost() &&
18055  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
18056  {
18057  void* pData = VMA_NULL;
18058  VkResult res = Map(hAllocation, &pData);
18059  if(res == VK_SUCCESS)
18060  {
18061  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
18062  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
18063  Unmap(hAllocation);
18064  }
18065  else
18066  {
18067  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
18068  }
18069  }
18070 }
18071 
18072 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
18073 {
18074  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
18075  if(memoryTypeBits == UINT32_MAX)
18076  {
18077  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
18078  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
18079  }
18080  return memoryTypeBits;
18081 }
18082 
18083 #if VMA_STATS_STRING_ENABLED
18084 
18085 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
18086 {
18087  bool dedicatedAllocationsStarted = false;
18088  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
18089  {
18090  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
18091  DedicatedAllocationLinkedList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
18092  if(!dedicatedAllocList.IsEmpty())
18093  {
18094  if(dedicatedAllocationsStarted == false)
18095  {
18096  dedicatedAllocationsStarted = true;
18097  json.WriteString("DedicatedAllocations");
18098  json.BeginObject();
18099  }
18100 
18101  json.BeginString("Type ");
18102  json.ContinueString(memTypeIndex);
18103  json.EndString();
18104 
18105  json.BeginArray();
18106 
18107  for(VmaAllocation alloc = dedicatedAllocList.Front();
18108  alloc != VMA_NULL; alloc = dedicatedAllocList.GetNext(alloc))
18109  {
18110  json.BeginObject(true);
18111  alloc->PrintParameters(json);
18112  json.EndObject();
18113  }
18114 
18115  json.EndArray();
18116  }
18117  }
18118  if(dedicatedAllocationsStarted)
18119  {
18120  json.EndObject();
18121  }
18122 
18123  {
18124  bool allocationsStarted = false;
18125  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
18126  {
18127  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
18128  {
18129  if(allocationsStarted == false)
18130  {
18131  allocationsStarted = true;
18132  json.WriteString("DefaultPools");
18133  json.BeginObject();
18134  }
18135 
18136  json.BeginString("Type ");
18137  json.ContinueString(memTypeIndex);
18138  json.EndString();
18139 
18140  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
18141  }
18142  }
18143  if(allocationsStarted)
18144  {
18145  json.EndObject();
18146  }
18147  }
18148 
18149  // Custom pools
18150  {
18151  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
18152  if(!m_Pools.IsEmpty())
18153  {
18154  json.WriteString("Pools");
18155  json.BeginObject();
18156  for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
18157  {
18158  json.BeginString();
18159  json.ContinueString(pool->GetId());
18160  json.EndString();
18161 
18162  pool->m_BlockVector.PrintDetailedMap(json);
18163  }
18164  json.EndObject();
18165  }
18166  }
18167 }
18168 
18169 #endif // #if VMA_STATS_STRING_ENABLED
18170 
18172 // Public interface
18173 
18174 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
18175  const VmaAllocatorCreateInfo* pCreateInfo,
18176  VmaAllocator* pAllocator)
18177 {
18178  VMA_ASSERT(pCreateInfo && pAllocator);
18179  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
18180  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
18181  VMA_DEBUG_LOG("vmaCreateAllocator");
18182  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
18183  return (*pAllocator)->Init(pCreateInfo);
18184 }
18185 
18186 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
18187  VmaAllocator allocator)
18188 {
18189  if(allocator != VK_NULL_HANDLE)
18190  {
18191  VMA_DEBUG_LOG("vmaDestroyAllocator");
18192  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
18193  vma_delete(&allocationCallbacks, allocator);
18194  }
18195 }
18196 
18197 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
18198 {
18199  VMA_ASSERT(allocator && pAllocatorInfo);
18200  pAllocatorInfo->instance = allocator->m_hInstance;
18201  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
18202  pAllocatorInfo->device = allocator->m_hDevice;
18203 }
18204 
18205 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
18206  VmaAllocator allocator,
18207  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
18208 {
18209  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
18210  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
18211 }
18212 
18213 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
18214  VmaAllocator allocator,
18215  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
18216 {
18217  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
18218  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
18219 }
18220 
18221 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
18222  VmaAllocator allocator,
18223  uint32_t memoryTypeIndex,
18224  VkMemoryPropertyFlags* pFlags)
18225 {
18226  VMA_ASSERT(allocator && pFlags);
18227  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
18228  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
18229 }
18230 
18231 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
18232  VmaAllocator allocator,
18233  uint32_t frameIndex)
18234 {
18235  VMA_ASSERT(allocator);
18236  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
18237 
18238  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18239 
18240  allocator->SetCurrentFrameIndex(frameIndex);
18241 }
18242 
18243 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
18244  VmaAllocator allocator,
18245  VmaStats* pStats)
18246 {
18247  VMA_ASSERT(allocator && pStats);
18248  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18249  allocator->CalculateStats(pStats);
18250 }
18251 
18252 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
18253  VmaAllocator allocator,
18254  VmaBudget* pBudget)
18255 {
18256  VMA_ASSERT(allocator && pBudget);
18257  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18258  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
18259 }
18260 
18261 #if VMA_STATS_STRING_ENABLED
18262 
18263 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
18264  VmaAllocator allocator,
18265  char** ppStatsString,
18266  VkBool32 detailedMap)
18267 {
18268  VMA_ASSERT(allocator && ppStatsString);
18269  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18270 
18271  VmaStringBuilder sb(allocator);
18272  {
18273  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
18274  json.BeginObject();
18275 
18276  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
18277  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
18278 
18279  VmaStats stats;
18280  allocator->CalculateStats(&stats);
18281 
18282  json.WriteString("Total");
18283  VmaPrintStatInfo(json, stats.total);
18284 
18285  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
18286  {
18287  json.BeginString("Heap ");
18288  json.ContinueString(heapIndex);
18289  json.EndString();
18290  json.BeginObject();
18291 
18292  json.WriteString("Size");
18293  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
18294 
18295  json.WriteString("Flags");
18296  json.BeginArray(true);
18297  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
18298  {
18299  json.WriteString("DEVICE_LOCAL");
18300  }
18301  json.EndArray();
18302 
18303  json.WriteString("Budget");
18304  json.BeginObject();
18305  {
18306  json.WriteString("BlockBytes");
18307  json.WriteNumber(budget[heapIndex].blockBytes);
18308  json.WriteString("AllocationBytes");
18309  json.WriteNumber(budget[heapIndex].allocationBytes);
18310  json.WriteString("Usage");
18311  json.WriteNumber(budget[heapIndex].usage);
18312  json.WriteString("Budget");
18313  json.WriteNumber(budget[heapIndex].budget);
18314  }
18315  json.EndObject();
18316 
18317  if(stats.memoryHeap[heapIndex].blockCount > 0)
18318  {
18319  json.WriteString("Stats");
18320  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
18321  }
18322 
18323  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
18324  {
18325  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
18326  {
18327  json.BeginString("Type ");
18328  json.ContinueString(typeIndex);
18329  json.EndString();
18330 
18331  json.BeginObject();
18332 
18333  json.WriteString("Flags");
18334  json.BeginArray(true);
18335  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
18336  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
18337  {
18338  json.WriteString("DEVICE_LOCAL");
18339  }
18340  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
18341  {
18342  json.WriteString("HOST_VISIBLE");
18343  }
18344  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
18345  {
18346  json.WriteString("HOST_COHERENT");
18347  }
18348  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
18349  {
18350  json.WriteString("HOST_CACHED");
18351  }
18352  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
18353  {
18354  json.WriteString("LAZILY_ALLOCATED");
18355  }
18356 #if VMA_VULKAN_VERSION >= 1001000
18357  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
18358  {
18359  json.WriteString("PROTECTED");
18360  }
18361 #endif // #if VMA_VULKAN_VERSION >= 1001000
18362 #if VK_AMD_device_coherent_memory
18363  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
18364  {
18365  json.WriteString("DEVICE_COHERENT");
18366  }
18367  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
18368  {
18369  json.WriteString("DEVICE_UNCACHED");
18370  }
18371 #endif // #if VK_AMD_device_coherent_memory
18372  json.EndArray();
18373 
18374  if(stats.memoryType[typeIndex].blockCount > 0)
18375  {
18376  json.WriteString("Stats");
18377  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
18378  }
18379 
18380  json.EndObject();
18381  }
18382  }
18383 
18384  json.EndObject();
18385  }
18386  if(detailedMap == VK_TRUE)
18387  {
18388  allocator->PrintDetailedMap(json);
18389  }
18390 
18391  json.EndObject();
18392  }
18393 
18394  const size_t len = sb.GetLength();
18395  char* const pChars = vma_new_array(allocator, char, len + 1);
18396  if(len > 0)
18397  {
18398  memcpy(pChars, sb.GetData(), len);
18399  }
18400  pChars[len] = '\0';
18401  *ppStatsString = pChars;
18402 }
18403 
18404 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
18405  VmaAllocator allocator,
18406  char* pStatsString)
18407 {
18408  if(pStatsString != VMA_NULL)
18409  {
18410  VMA_ASSERT(allocator);
18411  size_t len = strlen(pStatsString);
18412  vma_delete_array(allocator, pStatsString, len + 1);
18413  }
18414 }
18415 
18416 #endif // #if VMA_STATS_STRING_ENABLED
18417 
18418 /*
18419 This function is not protected by any mutex because it just reads immutable data.
18420 */
18421 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
18422  VmaAllocator allocator,
18423  uint32_t memoryTypeBits,
18424  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18425  uint32_t* pMemoryTypeIndex)
18426 {
18427  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18428  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18429  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18430 
18431  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
18432 
18433  if(pAllocationCreateInfo->memoryTypeBits != 0)
18434  {
18435  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
18436  }
18437 
18438  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
18439  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
18440  uint32_t notPreferredFlags = 0;
18441 
18442  // Convert usage to requiredFlags and preferredFlags.
18443  switch(pAllocationCreateInfo->usage)
18444  {
18446  break;
18448  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
18449  {
18450  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18451  }
18452  break;
18454  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
18455  break;
18457  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
18458  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
18459  {
18460  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18461  }
18462  break;
18464  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
18465  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
18466  break;
18468  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18469  break;
18471  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
18472  break;
18473  default:
18474  VMA_ASSERT(0);
18475  break;
18476  }
18477 
18478  // Avoid DEVICE_COHERENT unless explicitly requested.
18479  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
18480  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
18481  {
18482  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
18483  }
18484 
18485  *pMemoryTypeIndex = UINT32_MAX;
18486  uint32_t minCost = UINT32_MAX;
18487  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
18488  memTypeIndex < allocator->GetMemoryTypeCount();
18489  ++memTypeIndex, memTypeBit <<= 1)
18490  {
18491  // This memory type is acceptable according to memoryTypeBits bitmask.
18492  if((memTypeBit & memoryTypeBits) != 0)
18493  {
18494  const VkMemoryPropertyFlags currFlags =
18495  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
18496  // This memory type contains requiredFlags.
18497  if((requiredFlags & ~currFlags) == 0)
18498  {
18499  // Calculate cost as number of bits from preferredFlags not present in this memory type.
18500  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
18501  VmaCountBitsSet(currFlags & notPreferredFlags);
18502  // Remember memory type with lowest cost.
18503  if(currCost < minCost)
18504  {
18505  *pMemoryTypeIndex = memTypeIndex;
18506  if(currCost == 0)
18507  {
18508  return VK_SUCCESS;
18509  }
18510  minCost = currCost;
18511  }
18512  }
18513  }
18514  }
18515  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
18516 }
18517 
18518 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
18519  VmaAllocator allocator,
18520  const VkBufferCreateInfo* pBufferCreateInfo,
18521  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18522  uint32_t* pMemoryTypeIndex)
18523 {
18524  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18525  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
18526  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18527  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18528 
18529  const VkDevice hDev = allocator->m_hDevice;
18530  VkBuffer hBuffer = VK_NULL_HANDLE;
18531  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
18532  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
18533  if(res == VK_SUCCESS)
18534  {
18535  VkMemoryRequirements memReq = {};
18536  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
18537  hDev, hBuffer, &memReq);
18538 
18539  res = vmaFindMemoryTypeIndex(
18540  allocator,
18541  memReq.memoryTypeBits,
18542  pAllocationCreateInfo,
18543  pMemoryTypeIndex);
18544 
18545  allocator->GetVulkanFunctions().vkDestroyBuffer(
18546  hDev, hBuffer, allocator->GetAllocationCallbacks());
18547  }
18548  return res;
18549 }
18550 
18551 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
18552  VmaAllocator allocator,
18553  const VkImageCreateInfo* pImageCreateInfo,
18554  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18555  uint32_t* pMemoryTypeIndex)
18556 {
18557  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18558  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
18559  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18560  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18561 
18562  const VkDevice hDev = allocator->m_hDevice;
18563  VkImage hImage = VK_NULL_HANDLE;
18564  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
18565  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
18566  if(res == VK_SUCCESS)
18567  {
18568  VkMemoryRequirements memReq = {};
18569  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
18570  hDev, hImage, &memReq);
18571 
18572  res = vmaFindMemoryTypeIndex(
18573  allocator,
18574  memReq.memoryTypeBits,
18575  pAllocationCreateInfo,
18576  pMemoryTypeIndex);
18577 
18578  allocator->GetVulkanFunctions().vkDestroyImage(
18579  hDev, hImage, allocator->GetAllocationCallbacks());
18580  }
18581  return res;
18582 }
18583 
18584 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
18585  VmaAllocator allocator,
18586  const VmaPoolCreateInfo* pCreateInfo,
18587  VmaPool* pPool)
18588 {
18589  VMA_ASSERT(allocator && pCreateInfo && pPool);
18590 
18591  VMA_DEBUG_LOG("vmaCreatePool");
18592 
18593  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18594 
18595  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
18596 
18597 #if VMA_RECORDING_ENABLED
18598  if(allocator->GetRecorder() != VMA_NULL)
18599  {
18600  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
18601  }
18602 #endif
18603 
18604  return res;
18605 }
18606 
18607 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
18608  VmaAllocator allocator,
18609  VmaPool pool)
18610 {
18611  VMA_ASSERT(allocator);
18612 
18613  if(pool == VK_NULL_HANDLE)
18614  {
18615  return;
18616  }
18617 
18618  VMA_DEBUG_LOG("vmaDestroyPool");
18619 
18620  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18621 
18622 #if VMA_RECORDING_ENABLED
18623  if(allocator->GetRecorder() != VMA_NULL)
18624  {
18625  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
18626  }
18627 #endif
18628 
18629  allocator->DestroyPool(pool);
18630 }
18631 
18632 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
18633  VmaAllocator allocator,
18634  VmaPool pool,
18635  VmaPoolStats* pPoolStats)
18636 {
18637  VMA_ASSERT(allocator && pool && pPoolStats);
18638 
18639  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18640 
18641  allocator->GetPoolStats(pool, pPoolStats);
18642 }
18643 
18644 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
18645  VmaAllocator allocator,
18646  VmaPool pool,
18647  size_t* pLostAllocationCount)
18648 {
18649  VMA_ASSERT(allocator && pool);
18650 
18651  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18652 
18653 #if VMA_RECORDING_ENABLED
18654  if(allocator->GetRecorder() != VMA_NULL)
18655  {
18656  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
18657  }
18658 #endif
18659 
18660  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
18661 }
18662 
18663 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
18664 {
18665  VMA_ASSERT(allocator && pool);
18666 
18667  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18668 
18669  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
18670 
18671  return allocator->CheckPoolCorruption(pool);
18672 }
18673 
18674 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
18675  VmaAllocator allocator,
18676  VmaPool pool,
18677  const char** ppName)
18678 {
18679  VMA_ASSERT(allocator && pool && ppName);
18680 
18681  VMA_DEBUG_LOG("vmaGetPoolName");
18682 
18683  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18684 
18685  *ppName = pool->GetName();
18686 }
18687 
18688 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
18689  VmaAllocator allocator,
18690  VmaPool pool,
18691  const char* pName)
18692 {
18693  VMA_ASSERT(allocator && pool);
18694 
18695  VMA_DEBUG_LOG("vmaSetPoolName");
18696 
18697  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18698 
18699  pool->SetName(pName);
18700 
18701 #if VMA_RECORDING_ENABLED
18702  if(allocator->GetRecorder() != VMA_NULL)
18703  {
18704  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
18705  }
18706 #endif
18707 }
18708 
18709 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
18710  VmaAllocator allocator,
18711  const VkMemoryRequirements* pVkMemoryRequirements,
18712  const VmaAllocationCreateInfo* pCreateInfo,
18713  VmaAllocation* pAllocation,
18714  VmaAllocationInfo* pAllocationInfo)
18715 {
18716  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
18717 
18718  VMA_DEBUG_LOG("vmaAllocateMemory");
18719 
18720  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18721 
18722  VkResult result = allocator->AllocateMemory(
18723  *pVkMemoryRequirements,
18724  false, // requiresDedicatedAllocation
18725  false, // prefersDedicatedAllocation
18726  VK_NULL_HANDLE, // dedicatedBuffer
18727  UINT32_MAX, // dedicatedBufferUsage
18728  VK_NULL_HANDLE, // dedicatedImage
18729  *pCreateInfo,
18730  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18731  1, // allocationCount
18732  pAllocation);
18733 
18734 #if VMA_RECORDING_ENABLED
18735  if(allocator->GetRecorder() != VMA_NULL)
18736  {
18737  allocator->GetRecorder()->RecordAllocateMemory(
18738  allocator->GetCurrentFrameIndex(),
18739  *pVkMemoryRequirements,
18740  *pCreateInfo,
18741  *pAllocation);
18742  }
18743 #endif
18744 
18745  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18746  {
18747  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18748  }
18749 
18750  return result;
18751 }
18752 
18753 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
18754  VmaAllocator allocator,
18755  const VkMemoryRequirements* pVkMemoryRequirements,
18756  const VmaAllocationCreateInfo* pCreateInfo,
18757  size_t allocationCount,
18758  VmaAllocation* pAllocations,
18759  VmaAllocationInfo* pAllocationInfo)
18760 {
18761  if(allocationCount == 0)
18762  {
18763  return VK_SUCCESS;
18764  }
18765 
18766  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
18767 
18768  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
18769 
18770  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18771 
18772  VkResult result = allocator->AllocateMemory(
18773  *pVkMemoryRequirements,
18774  false, // requiresDedicatedAllocation
18775  false, // prefersDedicatedAllocation
18776  VK_NULL_HANDLE, // dedicatedBuffer
18777  UINT32_MAX, // dedicatedBufferUsage
18778  VK_NULL_HANDLE, // dedicatedImage
18779  *pCreateInfo,
18780  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18781  allocationCount,
18782  pAllocations);
18783 
18784 #if VMA_RECORDING_ENABLED
18785  if(allocator->GetRecorder() != VMA_NULL)
18786  {
18787  allocator->GetRecorder()->RecordAllocateMemoryPages(
18788  allocator->GetCurrentFrameIndex(),
18789  *pVkMemoryRequirements,
18790  *pCreateInfo,
18791  (uint64_t)allocationCount,
18792  pAllocations);
18793  }
18794 #endif
18795 
18796  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18797  {
18798  for(size_t i = 0; i < allocationCount; ++i)
18799  {
18800  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
18801  }
18802  }
18803 
18804  return result;
18805 }
18806 
18807 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
18808  VmaAllocator allocator,
18809  VkBuffer buffer,
18810  const VmaAllocationCreateInfo* pCreateInfo,
18811  VmaAllocation* pAllocation,
18812  VmaAllocationInfo* pAllocationInfo)
18813 {
18814  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18815 
18816  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
18817 
18818  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18819 
18820  VkMemoryRequirements vkMemReq = {};
18821  bool requiresDedicatedAllocation = false;
18822  bool prefersDedicatedAllocation = false;
18823  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
18824  requiresDedicatedAllocation,
18825  prefersDedicatedAllocation);
18826 
18827  VkResult result = allocator->AllocateMemory(
18828  vkMemReq,
18829  requiresDedicatedAllocation,
18830  prefersDedicatedAllocation,
18831  buffer, // dedicatedBuffer
18832  UINT32_MAX, // dedicatedBufferUsage
18833  VK_NULL_HANDLE, // dedicatedImage
18834  *pCreateInfo,
18835  VMA_SUBALLOCATION_TYPE_BUFFER,
18836  1, // allocationCount
18837  pAllocation);
18838 
18839 #if VMA_RECORDING_ENABLED
18840  if(allocator->GetRecorder() != VMA_NULL)
18841  {
18842  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
18843  allocator->GetCurrentFrameIndex(),
18844  vkMemReq,
18845  requiresDedicatedAllocation,
18846  prefersDedicatedAllocation,
18847  *pCreateInfo,
18848  *pAllocation);
18849  }
18850 #endif
18851 
18852  if(pAllocationInfo && result == VK_SUCCESS)
18853  {
18854  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18855  }
18856 
18857  return result;
18858 }
18859 
18860 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
18861  VmaAllocator allocator,
18862  VkImage image,
18863  const VmaAllocationCreateInfo* pCreateInfo,
18864  VmaAllocation* pAllocation,
18865  VmaAllocationInfo* pAllocationInfo)
18866 {
18867  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18868 
18869  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
18870 
18871  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18872 
18873  VkMemoryRequirements vkMemReq = {};
18874  bool requiresDedicatedAllocation = false;
18875  bool prefersDedicatedAllocation = false;
18876  allocator->GetImageMemoryRequirements(image, vkMemReq,
18877  requiresDedicatedAllocation, prefersDedicatedAllocation);
18878 
18879  VkResult result = allocator->AllocateMemory(
18880  vkMemReq,
18881  requiresDedicatedAllocation,
18882  prefersDedicatedAllocation,
18883  VK_NULL_HANDLE, // dedicatedBuffer
18884  UINT32_MAX, // dedicatedBufferUsage
18885  image, // dedicatedImage
18886  *pCreateInfo,
18887  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
18888  1, // allocationCount
18889  pAllocation);
18890 
18891 #if VMA_RECORDING_ENABLED
18892  if(allocator->GetRecorder() != VMA_NULL)
18893  {
18894  allocator->GetRecorder()->RecordAllocateMemoryForImage(
18895  allocator->GetCurrentFrameIndex(),
18896  vkMemReq,
18897  requiresDedicatedAllocation,
18898  prefersDedicatedAllocation,
18899  *pCreateInfo,
18900  *pAllocation);
18901  }
18902 #endif
18903 
18904  if(pAllocationInfo && result == VK_SUCCESS)
18905  {
18906  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18907  }
18908 
18909  return result;
18910 }
18911 
18912 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
18913  VmaAllocator allocator,
18914  VmaAllocation allocation)
18915 {
18916  VMA_ASSERT(allocator);
18917 
18918  if(allocation == VK_NULL_HANDLE)
18919  {
18920  return;
18921  }
18922 
18923  VMA_DEBUG_LOG("vmaFreeMemory");
18924 
18925  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18926 
18927 #if VMA_RECORDING_ENABLED
18928  if(allocator->GetRecorder() != VMA_NULL)
18929  {
18930  allocator->GetRecorder()->RecordFreeMemory(
18931  allocator->GetCurrentFrameIndex(),
18932  allocation);
18933  }
18934 #endif
18935 
18936  allocator->FreeMemory(
18937  1, // allocationCount
18938  &allocation);
18939 }
18940 
18941 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
18942  VmaAllocator allocator,
18943  size_t allocationCount,
18944  const VmaAllocation* pAllocations)
18945 {
18946  if(allocationCount == 0)
18947  {
18948  return;
18949  }
18950 
18951  VMA_ASSERT(allocator);
18952 
18953  VMA_DEBUG_LOG("vmaFreeMemoryPages");
18954 
18955  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18956 
18957 #if VMA_RECORDING_ENABLED
18958  if(allocator->GetRecorder() != VMA_NULL)
18959  {
18960  allocator->GetRecorder()->RecordFreeMemoryPages(
18961  allocator->GetCurrentFrameIndex(),
18962  (uint64_t)allocationCount,
18963  pAllocations);
18964  }
18965 #endif
18966 
18967  allocator->FreeMemory(allocationCount, pAllocations);
18968 }
18969 
18970 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
18971  VmaAllocator allocator,
18972  VmaAllocation allocation,
18973  VmaAllocationInfo* pAllocationInfo)
18974 {
18975  VMA_ASSERT(allocator && allocation && pAllocationInfo);
18976 
18977  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18978 
18979 #if VMA_RECORDING_ENABLED
18980  if(allocator->GetRecorder() != VMA_NULL)
18981  {
18982  allocator->GetRecorder()->RecordGetAllocationInfo(
18983  allocator->GetCurrentFrameIndex(),
18984  allocation);
18985  }
18986 #endif
18987 
18988  allocator->GetAllocationInfo(allocation, pAllocationInfo);
18989 }
18990 
18991 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
18992  VmaAllocator allocator,
18993  VmaAllocation allocation)
18994 {
18995  VMA_ASSERT(allocator && allocation);
18996 
18997  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18998 
18999 #if VMA_RECORDING_ENABLED
19000  if(allocator->GetRecorder() != VMA_NULL)
19001  {
19002  allocator->GetRecorder()->RecordTouchAllocation(
19003  allocator->GetCurrentFrameIndex(),
19004  allocation);
19005  }
19006 #endif
19007 
19008  return allocator->TouchAllocation(allocation);
19009 }
19010 
19011 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
19012  VmaAllocator allocator,
19013  VmaAllocation allocation,
19014  void* pUserData)
19015 {
19016  VMA_ASSERT(allocator && allocation);
19017 
19018  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19019 
19020  allocation->SetUserData(allocator, pUserData);
19021 
19022 #if VMA_RECORDING_ENABLED
19023  if(allocator->GetRecorder() != VMA_NULL)
19024  {
19025  allocator->GetRecorder()->RecordSetAllocationUserData(
19026  allocator->GetCurrentFrameIndex(),
19027  allocation,
19028  pUserData);
19029  }
19030 #endif
19031 }
19032 
19033 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
19034  VmaAllocator allocator,
19035  VmaAllocation* pAllocation)
19036 {
19037  VMA_ASSERT(allocator && pAllocation);
19038 
19039  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
19040 
19041  allocator->CreateLostAllocation(pAllocation);
19042 
19043 #if VMA_RECORDING_ENABLED
19044  if(allocator->GetRecorder() != VMA_NULL)
19045  {
19046  allocator->GetRecorder()->RecordCreateLostAllocation(
19047  allocator->GetCurrentFrameIndex(),
19048  *pAllocation);
19049  }
19050 #endif
19051 }
19052 
19053 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
19054  VmaAllocator allocator,
19055  VmaAllocation allocation,
19056  void** ppData)
19057 {
19058  VMA_ASSERT(allocator && allocation && ppData);
19059 
19060  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19061 
19062  VkResult res = allocator->Map(allocation, ppData);
19063 
19064 #if VMA_RECORDING_ENABLED
19065  if(allocator->GetRecorder() != VMA_NULL)
19066  {
19067  allocator->GetRecorder()->RecordMapMemory(
19068  allocator->GetCurrentFrameIndex(),
19069  allocation);
19070  }
19071 #endif
19072 
19073  return res;
19074 }
19075 
19076 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
19077  VmaAllocator allocator,
19078  VmaAllocation allocation)
19079 {
19080  VMA_ASSERT(allocator && allocation);
19081 
19082  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19083 
19084 #if VMA_RECORDING_ENABLED
19085  if(allocator->GetRecorder() != VMA_NULL)
19086  {
19087  allocator->GetRecorder()->RecordUnmapMemory(
19088  allocator->GetCurrentFrameIndex(),
19089  allocation);
19090  }
19091 #endif
19092 
19093  allocator->Unmap(allocation);
19094 }
19095 
19096 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
19097 {
19098  VMA_ASSERT(allocator && allocation);
19099 
19100  VMA_DEBUG_LOG("vmaFlushAllocation");
19101 
19102  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19103 
19104  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
19105 
19106 #if VMA_RECORDING_ENABLED
19107  if(allocator->GetRecorder() != VMA_NULL)
19108  {
19109  allocator->GetRecorder()->RecordFlushAllocation(
19110  allocator->GetCurrentFrameIndex(),
19111  allocation, offset, size);
19112  }
19113 #endif
19114 
19115  return res;
19116 }
19117 
19118 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
19119 {
19120  VMA_ASSERT(allocator && allocation);
19121 
19122  VMA_DEBUG_LOG("vmaInvalidateAllocation");
19123 
19124  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19125 
19126  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
19127 
19128 #if VMA_RECORDING_ENABLED
19129  if(allocator->GetRecorder() != VMA_NULL)
19130  {
19131  allocator->GetRecorder()->RecordInvalidateAllocation(
19132  allocator->GetCurrentFrameIndex(),
19133  allocation, offset, size);
19134  }
19135 #endif
19136 
19137  return res;
19138 }
19139 
19140 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
19141  VmaAllocator allocator,
19142  uint32_t allocationCount,
19143  const VmaAllocation* allocations,
19144  const VkDeviceSize* offsets,
19145  const VkDeviceSize* sizes)
19146 {
19147  VMA_ASSERT(allocator);
19148 
19149  if(allocationCount == 0)
19150  {
19151  return VK_SUCCESS;
19152  }
19153 
19154  VMA_ASSERT(allocations);
19155 
19156  VMA_DEBUG_LOG("vmaFlushAllocations");
19157 
19158  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19159 
19160  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
19161 
19162 #if VMA_RECORDING_ENABLED
19163  if(allocator->GetRecorder() != VMA_NULL)
19164  {
19165  //TODO
19166  }
19167 #endif
19168 
19169  return res;
19170 }
19171 
19172 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
19173  VmaAllocator allocator,
19174  uint32_t allocationCount,
19175  const VmaAllocation* allocations,
19176  const VkDeviceSize* offsets,
19177  const VkDeviceSize* sizes)
19178 {
19179  VMA_ASSERT(allocator);
19180 
19181  if(allocationCount == 0)
19182  {
19183  return VK_SUCCESS;
19184  }
19185 
19186  VMA_ASSERT(allocations);
19187 
19188  VMA_DEBUG_LOG("vmaInvalidateAllocations");
19189 
19190  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19191 
19192  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
19193 
19194 #if VMA_RECORDING_ENABLED
19195  if(allocator->GetRecorder() != VMA_NULL)
19196  {
19197  //TODO
19198  }
19199 #endif
19200 
19201  return res;
19202 }
19203 
19204 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
19205 {
19206  VMA_ASSERT(allocator);
19207 
19208  VMA_DEBUG_LOG("vmaCheckCorruption");
19209 
19210  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19211 
19212  return allocator->CheckCorruption(memoryTypeBits);
19213 }
19214 
19215 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
19216  VmaAllocator allocator,
19217  const VmaAllocation* pAllocations,
19218  size_t allocationCount,
19219  VkBool32* pAllocationsChanged,
19220  const VmaDefragmentationInfo *pDefragmentationInfo,
19221  VmaDefragmentationStats* pDefragmentationStats)
19222 {
19223  // Deprecated interface, reimplemented using new one.
19224 
19225  VmaDefragmentationInfo2 info2 = {};
19226  info2.allocationCount = (uint32_t)allocationCount;
19227  info2.pAllocations = pAllocations;
19228  info2.pAllocationsChanged = pAllocationsChanged;
19229  if(pDefragmentationInfo != VMA_NULL)
19230  {
19231  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
19232  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
19233  }
19234  else
19235  {
19236  info2.maxCpuAllocationsToMove = UINT32_MAX;
19237  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
19238  }
19239  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
19240 
19242  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
19243  if(res == VK_NOT_READY)
19244  {
19245  res = vmaDefragmentationEnd( allocator, ctx);
19246  }
19247  return res;
19248 }
19249 
19250 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
19251  VmaAllocator allocator,
19252  const VmaDefragmentationInfo2* pInfo,
19253  VmaDefragmentationStats* pStats,
19254  VmaDefragmentationContext *pContext)
19255 {
19256  VMA_ASSERT(allocator && pInfo && pContext);
19257 
19258  // Degenerate case: Nothing to defragment.
19259  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
19260  {
19261  return VK_SUCCESS;
19262  }
19263 
19264  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
19265  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
19266  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
19267  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
19268 
19269  VMA_DEBUG_LOG("vmaDefragmentationBegin");
19270 
19271  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19272 
19273  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
19274 
19275 #if VMA_RECORDING_ENABLED
19276  if(allocator->GetRecorder() != VMA_NULL)
19277  {
19278  allocator->GetRecorder()->RecordDefragmentationBegin(
19279  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
19280  }
19281 #endif
19282 
19283  return res;
19284 }
19285 
19286 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
19287  VmaAllocator allocator,
19288  VmaDefragmentationContext context)
19289 {
19290  VMA_ASSERT(allocator);
19291 
19292  VMA_DEBUG_LOG("vmaDefragmentationEnd");
19293 
19294  if(context != VK_NULL_HANDLE)
19295  {
19296  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19297 
19298 #if VMA_RECORDING_ENABLED
19299  if(allocator->GetRecorder() != VMA_NULL)
19300  {
19301  allocator->GetRecorder()->RecordDefragmentationEnd(
19302  allocator->GetCurrentFrameIndex(), context);
19303  }
19304 #endif
19305 
19306  return allocator->DefragmentationEnd(context);
19307  }
19308  else
19309  {
19310  return VK_SUCCESS;
19311  }
19312 }
19313 
19314 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
19315  VmaAllocator allocator,
19316  VmaDefragmentationContext context,
19318  )
19319 {
19320  VMA_ASSERT(allocator);
19321  VMA_ASSERT(pInfo);
19322 
19323  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
19324 
19325  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19326 
19327  if(context == VK_NULL_HANDLE)
19328  {
19329  pInfo->moveCount = 0;
19330  return VK_SUCCESS;
19331  }
19332 
19333  return allocator->DefragmentationPassBegin(pInfo, context);
19334 }
19335 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
19336  VmaAllocator allocator,
19337  VmaDefragmentationContext context)
19338 {
19339  VMA_ASSERT(allocator);
19340 
19341  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
19342  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19343 
19344  if(context == VK_NULL_HANDLE)
19345  return VK_SUCCESS;
19346 
19347  return allocator->DefragmentationPassEnd(context);
19348 }
19349 
19350 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
19351  VmaAllocator allocator,
19352  VmaAllocation allocation,
19353  VkBuffer buffer)
19354 {
19355  VMA_ASSERT(allocator && allocation && buffer);
19356 
19357  VMA_DEBUG_LOG("vmaBindBufferMemory");
19358 
19359  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19360 
19361  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
19362 }
19363 
19364 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
19365  VmaAllocator allocator,
19366  VmaAllocation allocation,
19367  VkDeviceSize allocationLocalOffset,
19368  VkBuffer buffer,
19369  const void* pNext)
19370 {
19371  VMA_ASSERT(allocator && allocation && buffer);
19372 
19373  VMA_DEBUG_LOG("vmaBindBufferMemory2");
19374 
19375  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19376 
19377  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
19378 }
19379 
19380 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
19381  VmaAllocator allocator,
19382  VmaAllocation allocation,
19383  VkImage image)
19384 {
19385  VMA_ASSERT(allocator && allocation && image);
19386 
19387  VMA_DEBUG_LOG("vmaBindImageMemory");
19388 
19389  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19390 
19391  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
19392 }
19393 
19394 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
19395  VmaAllocator allocator,
19396  VmaAllocation allocation,
19397  VkDeviceSize allocationLocalOffset,
19398  VkImage image,
19399  const void* pNext)
19400 {
19401  VMA_ASSERT(allocator && allocation && image);
19402 
19403  VMA_DEBUG_LOG("vmaBindImageMemory2");
19404 
19405  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19406 
19407  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
19408 }
19409 
19410 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
19411  VmaAllocator allocator,
19412  const VkBufferCreateInfo* pBufferCreateInfo,
19413  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19414  VkBuffer* pBuffer,
19415  VmaAllocation* pAllocation,
19416  VmaAllocationInfo* pAllocationInfo)
19417 {
19418  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
19419 
19420  if(pBufferCreateInfo->size == 0)
19421  {
19422  return VK_ERROR_VALIDATION_FAILED_EXT;
19423  }
19424  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
19425  !allocator->m_UseKhrBufferDeviceAddress)
19426  {
19427  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
19428  return VK_ERROR_VALIDATION_FAILED_EXT;
19429  }
19430 
19431  VMA_DEBUG_LOG("vmaCreateBuffer");
19432 
19433  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19434 
19435  *pBuffer = VK_NULL_HANDLE;
19436  *pAllocation = VK_NULL_HANDLE;
19437 
19438  // 1. Create VkBuffer.
19439  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
19440  allocator->m_hDevice,
19441  pBufferCreateInfo,
19442  allocator->GetAllocationCallbacks(),
19443  pBuffer);
19444  if(res >= 0)
19445  {
19446  // 2. vkGetBufferMemoryRequirements.
19447  VkMemoryRequirements vkMemReq = {};
19448  bool requiresDedicatedAllocation = false;
19449  bool prefersDedicatedAllocation = false;
19450  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
19451  requiresDedicatedAllocation, prefersDedicatedAllocation);
19452 
19453  // 3. Allocate memory using allocator.
19454  res = allocator->AllocateMemory(
19455  vkMemReq,
19456  requiresDedicatedAllocation,
19457  prefersDedicatedAllocation,
19458  *pBuffer, // dedicatedBuffer
19459  pBufferCreateInfo->usage, // dedicatedBufferUsage
19460  VK_NULL_HANDLE, // dedicatedImage
19461  *pAllocationCreateInfo,
19462  VMA_SUBALLOCATION_TYPE_BUFFER,
19463  1, // allocationCount
19464  pAllocation);
19465 
19466 #if VMA_RECORDING_ENABLED
19467  if(allocator->GetRecorder() != VMA_NULL)
19468  {
19469  allocator->GetRecorder()->RecordCreateBuffer(
19470  allocator->GetCurrentFrameIndex(),
19471  *pBufferCreateInfo,
19472  *pAllocationCreateInfo,
19473  *pAllocation);
19474  }
19475 #endif
19476 
19477  if(res >= 0)
19478  {
19479  // 3. Bind buffer with memory.
19480  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19481  {
19482  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
19483  }
19484  if(res >= 0)
19485  {
19486  // All steps succeeded.
19487  #if VMA_STATS_STRING_ENABLED
19488  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
19489  #endif
19490  if(pAllocationInfo != VMA_NULL)
19491  {
19492  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19493  }
19494 
19495  return VK_SUCCESS;
19496  }
19497  allocator->FreeMemory(
19498  1, // allocationCount
19499  pAllocation);
19500  *pAllocation = VK_NULL_HANDLE;
19501  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19502  *pBuffer = VK_NULL_HANDLE;
19503  return res;
19504  }
19505  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19506  *pBuffer = VK_NULL_HANDLE;
19507  return res;
19508  }
19509  return res;
19510 }
19511 
19512 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
19513  VmaAllocator allocator,
19514  const VkBufferCreateInfo* pBufferCreateInfo,
19515  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19516  VkDeviceSize minAlignment,
19517  VkBuffer* pBuffer,
19518  VmaAllocation* pAllocation,
19519  VmaAllocationInfo* pAllocationInfo)
19520 {
19521  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation);
19522 
19523  if(pBufferCreateInfo->size == 0)
19524  {
19525  return VK_ERROR_VALIDATION_FAILED_EXT;
19526  }
19527  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
19528  !allocator->m_UseKhrBufferDeviceAddress)
19529  {
19530  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
19531  return VK_ERROR_VALIDATION_FAILED_EXT;
19532  }
19533 
19534  VMA_DEBUG_LOG("vmaCreateBufferWithAlignment");
19535 
19536  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19537 
19538  *pBuffer = VK_NULL_HANDLE;
19539  *pAllocation = VK_NULL_HANDLE;
19540 
19541  // 1. Create VkBuffer.
19542  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
19543  allocator->m_hDevice,
19544  pBufferCreateInfo,
19545  allocator->GetAllocationCallbacks(),
19546  pBuffer);
19547  if(res >= 0)
19548  {
19549  // 2. vkGetBufferMemoryRequirements.
19550  VkMemoryRequirements vkMemReq = {};
19551  bool requiresDedicatedAllocation = false;
19552  bool prefersDedicatedAllocation = false;
19553  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
19554  requiresDedicatedAllocation, prefersDedicatedAllocation);
19555 
19556  // 2a. Include minAlignment
19557  vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment);
19558 
19559  // 3. Allocate memory using allocator.
19560  res = allocator->AllocateMemory(
19561  vkMemReq,
19562  requiresDedicatedAllocation,
19563  prefersDedicatedAllocation,
19564  *pBuffer, // dedicatedBuffer
19565  pBufferCreateInfo->usage, // dedicatedBufferUsage
19566  VK_NULL_HANDLE, // dedicatedImage
19567  *pAllocationCreateInfo,
19568  VMA_SUBALLOCATION_TYPE_BUFFER,
19569  1, // allocationCount
19570  pAllocation);
19571 
19572 #if VMA_RECORDING_ENABLED
19573  if(allocator->GetRecorder() != VMA_NULL)
19574  {
19575  VMA_ASSERT(0 && "Not implemented.");
19576  }
19577 #endif
19578 
19579  if(res >= 0)
19580  {
19581  // 3. Bind buffer with memory.
19582  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19583  {
19584  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
19585  }
19586  if(res >= 0)
19587  {
19588  // All steps succeeded.
19589  #if VMA_STATS_STRING_ENABLED
19590  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
19591  #endif
19592  if(pAllocationInfo != VMA_NULL)
19593  {
19594  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19595  }
19596 
19597  return VK_SUCCESS;
19598  }
19599  allocator->FreeMemory(
19600  1, // allocationCount
19601  pAllocation);
19602  *pAllocation = VK_NULL_HANDLE;
19603  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19604  *pBuffer = VK_NULL_HANDLE;
19605  return res;
19606  }
19607  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19608  *pBuffer = VK_NULL_HANDLE;
19609  return res;
19610  }
19611  return res;
19612 }
19613 
19614 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
19615  VmaAllocator allocator,
19616  VkBuffer buffer,
19617  VmaAllocation allocation)
19618 {
19619  VMA_ASSERT(allocator);
19620 
19621  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19622  {
19623  return;
19624  }
19625 
19626  VMA_DEBUG_LOG("vmaDestroyBuffer");
19627 
19628  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19629 
19630 #if VMA_RECORDING_ENABLED
19631  if(allocator->GetRecorder() != VMA_NULL)
19632  {
19633  allocator->GetRecorder()->RecordDestroyBuffer(
19634  allocator->GetCurrentFrameIndex(),
19635  allocation);
19636  }
19637 #endif
19638 
19639  if(buffer != VK_NULL_HANDLE)
19640  {
19641  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
19642  }
19643 
19644  if(allocation != VK_NULL_HANDLE)
19645  {
19646  allocator->FreeMemory(
19647  1, // allocationCount
19648  &allocation);
19649  }
19650 }
19651 
19652 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
19653  VmaAllocator allocator,
19654  const VkImageCreateInfo* pImageCreateInfo,
19655  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19656  VkImage* pImage,
19657  VmaAllocation* pAllocation,
19658  VmaAllocationInfo* pAllocationInfo)
19659 {
19660  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
19661 
19662  if(pImageCreateInfo->extent.width == 0 ||
19663  pImageCreateInfo->extent.height == 0 ||
19664  pImageCreateInfo->extent.depth == 0 ||
19665  pImageCreateInfo->mipLevels == 0 ||
19666  pImageCreateInfo->arrayLayers == 0)
19667  {
19668  return VK_ERROR_VALIDATION_FAILED_EXT;
19669  }
19670 
19671  VMA_DEBUG_LOG("vmaCreateImage");
19672 
19673  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19674 
19675  *pImage = VK_NULL_HANDLE;
19676  *pAllocation = VK_NULL_HANDLE;
19677 
19678  // 1. Create VkImage.
19679  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
19680  allocator->m_hDevice,
19681  pImageCreateInfo,
19682  allocator->GetAllocationCallbacks(),
19683  pImage);
19684  if(res >= 0)
19685  {
19686  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
19687  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
19688  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
19689 
19690  // 2. Allocate memory using allocator.
19691  VkMemoryRequirements vkMemReq = {};
19692  bool requiresDedicatedAllocation = false;
19693  bool prefersDedicatedAllocation = false;
19694  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
19695  requiresDedicatedAllocation, prefersDedicatedAllocation);
19696 
19697  res = allocator->AllocateMemory(
19698  vkMemReq,
19699  requiresDedicatedAllocation,
19700  prefersDedicatedAllocation,
19701  VK_NULL_HANDLE, // dedicatedBuffer
19702  UINT32_MAX, // dedicatedBufferUsage
19703  *pImage, // dedicatedImage
19704  *pAllocationCreateInfo,
19705  suballocType,
19706  1, // allocationCount
19707  pAllocation);
19708 
19709 #if VMA_RECORDING_ENABLED
19710  if(allocator->GetRecorder() != VMA_NULL)
19711  {
19712  allocator->GetRecorder()->RecordCreateImage(
19713  allocator->GetCurrentFrameIndex(),
19714  *pImageCreateInfo,
19715  *pAllocationCreateInfo,
19716  *pAllocation);
19717  }
19718 #endif
19719 
19720  if(res >= 0)
19721  {
19722  // 3. Bind image with memory.
19723  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19724  {
19725  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
19726  }
19727  if(res >= 0)
19728  {
19729  // All steps succeeded.
19730  #if VMA_STATS_STRING_ENABLED
19731  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
19732  #endif
19733  if(pAllocationInfo != VMA_NULL)
19734  {
19735  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19736  }
19737 
19738  return VK_SUCCESS;
19739  }
19740  allocator->FreeMemory(
19741  1, // allocationCount
19742  pAllocation);
19743  *pAllocation = VK_NULL_HANDLE;
19744  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19745  *pImage = VK_NULL_HANDLE;
19746  return res;
19747  }
19748  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19749  *pImage = VK_NULL_HANDLE;
19750  return res;
19751  }
19752  return res;
19753 }
19754 
19755 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
19756  VmaAllocator allocator,
19757  VkImage image,
19758  VmaAllocation allocation)
19759 {
19760  VMA_ASSERT(allocator);
19761 
19762  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19763  {
19764  return;
19765  }
19766 
19767  VMA_DEBUG_LOG("vmaDestroyImage");
19768 
19769  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19770 
19771 #if VMA_RECORDING_ENABLED
19772  if(allocator->GetRecorder() != VMA_NULL)
19773  {
19774  allocator->GetRecorder()->RecordDestroyImage(
19775  allocator->GetCurrentFrameIndex(),
19776  allocation);
19777  }
19778 #endif
19779 
19780  if(image != VK_NULL_HANDLE)
19781  {
19782  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
19783  }
19784  if(allocation != VK_NULL_HANDLE)
19785  {
19786  allocator->FreeMemory(
19787  1, // allocationCount
19788  &allocation);
19789  }
19790 }
19791 
19792 #endif // #ifdef VMA_IMPLEMENTATION
Definition: vk_mem_alloc.h:2900
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2926
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2932
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2918
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2939
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2913
float priority
A floating-point value between 0 and 1, indicating the priority of the allocation relative to other m...
Definition: vk_mem_alloc.h:2946
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2908
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2902
Represents single memory allocation.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:3267
VkDeviceSize offset
Offset in VkDeviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:3291
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:3311
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:3272
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:3302
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:3316
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:3281
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2422
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2427
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2453
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2478
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2424
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null.
Definition: vk_mem_alloc.h:2484
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2436
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2496
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2433
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2491
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2430
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2505
const VkExternalMemoryHandleTypeFlagsKHR * pTypeExternalMemoryHandleTypes
Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
Definition: vk_mem_alloc.h:2516
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2439
Represents main object of this library initialized.
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2532
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2547
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2537
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2542
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2638
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2641
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2652
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2662
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2673
Represents Opaque object that represents started defragmentation process.
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3666
const VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3706
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3672
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3726
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3721
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3669
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3687
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3690
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3735
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3716
const VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3681
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3711
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3757
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3767
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3762
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3748
uint32_t moveCount
Definition: vk_mem_alloc.h:3749
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3750
Definition: vk_mem_alloc.h:3738
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3740
VkDeviceSize offset
Definition: vk_mem_alloc.h:3741
VmaAllocation allocation
Definition: vk_mem_alloc.h:3739
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3771
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3779
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3773
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3775
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3777
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:2231
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:2237
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:2233
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:2235
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:3068
float priority
A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relat...
Definition: vk_mem_alloc.h:3116
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:3071
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:3074
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:3110
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:3083
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:3088
VkDeviceSize minAllocationAlignment
Additional minimum alignment to be used for all allocations created from this pool....
Definition: vk_mem_alloc.h:3123
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:3096
void * pMemoryAllocateNext
Additional pNext chain to be attached to VkMemoryAllocateInfo used for every allocation made by this ...
Definition: vk_mem_alloc.h:3133
Represents custom memory pool.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:3138
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:3141
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:3160
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:3157
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:3147
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3144
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3150
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2407
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2417
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2409
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2599
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2610
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2610
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2609
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2611
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2603
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2611
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2607
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2601
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2610
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2605
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2611
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2616
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2618
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2617
VmaStatInfo total
Definition: vk_mem_alloc.h:2619
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2361
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2371
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2376
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2364
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2368
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2373
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2365
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2372
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2369
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2363
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2362
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2375
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2377
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2370
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2366
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2367
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2378
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2374
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:2217
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:2029
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
struct VmaStats VmaStats
General statistics from current state of Allocator.
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:3064
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2393
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2401
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2399
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:2241
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2316
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:2246
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2298
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2334
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2286
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2271
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2353
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
Definition: vk_mem_alloc.h:2351
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2897
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
void vmaFreeMemory(VmaAllocator allocator, const VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3656
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3657
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3658
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:2210
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, const VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3660
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:3008
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:3043
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3062
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:3054
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:3026
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:3058
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkResult vmaDefragment(VmaAllocator allocator, const VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
VkResult vmaCreateBufferWithAlignment(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkDeviceSize minAlignment, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Creates a buffer with additional minimum alignment.
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
VmaMemoryUsage
Definition: vk_mem_alloc.h:2721
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2784
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2752
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2774
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2768
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2782
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2759
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2742
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2725
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkResult vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VkResult vmaInvalidateAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Invalidates memory of given set of allocations.
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
VkResult vmaFlushAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Flushes memory of given set of allocations.
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2355
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2788
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2883
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2819
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2856
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2876
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2795
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2850
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2832
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2886
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2839
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2865
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2806
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2880
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2890
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2845
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2860
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2869
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2895
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2403
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.