Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2021 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
2017 #ifdef __cplusplus
2018 extern "C" {
2019 #endif
2020 
2021 /*
2022 Define this macro to 0/1 to disable/enable support for recording functionality,
2023 available through VmaAllocatorCreateInfo::pRecordSettings.
2024 */
2025 #ifndef VMA_RECORDING_ENABLED
2026  #define VMA_RECORDING_ENABLED 0
2027 #endif
2028 
2029 #if !defined(NOMINMAX) && defined(VMA_IMPLEMENTATION)
2030  #define NOMINMAX // For windows.h
2031 #endif
2032 
2033 #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
2034  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
2035  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
2036  extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
2037  extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
2038  extern PFN_vkAllocateMemory vkAllocateMemory;
2039  extern PFN_vkFreeMemory vkFreeMemory;
2040  extern PFN_vkMapMemory vkMapMemory;
2041  extern PFN_vkUnmapMemory vkUnmapMemory;
2042  extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
2043  extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
2044  extern PFN_vkBindBufferMemory vkBindBufferMemory;
2045  extern PFN_vkBindImageMemory vkBindImageMemory;
2046  extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
2047  extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
2048  extern PFN_vkCreateBuffer vkCreateBuffer;
2049  extern PFN_vkDestroyBuffer vkDestroyBuffer;
2050  extern PFN_vkCreateImage vkCreateImage;
2051  extern PFN_vkDestroyImage vkDestroyImage;
2052  extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
2053  #if VMA_VULKAN_VERSION >= 1001000
2054  extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
2055  extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
2056  extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
2057  extern PFN_vkBindImageMemory2 vkBindImageMemory2;
2058  extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
2059  #endif // #if VMA_VULKAN_VERSION >= 1001000
2060 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
2061 
2062 #ifndef VULKAN_H_
2063  #include <vulkan/vulkan.h>
2064 #endif
2065 
2066 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
2067 // where AAA = major, BBB = minor, CCC = patch.
2068 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
2069 #if !defined(VMA_VULKAN_VERSION)
2070  #if defined(VK_VERSION_1_2)
2071  #define VMA_VULKAN_VERSION 1002000
2072  #elif defined(VK_VERSION_1_1)
2073  #define VMA_VULKAN_VERSION 1001000
2074  #else
2075  #define VMA_VULKAN_VERSION 1000000
2076  #endif
2077 #endif
2078 
2079 #if !defined(VMA_DEDICATED_ALLOCATION)
2080  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
2081  #define VMA_DEDICATED_ALLOCATION 1
2082  #else
2083  #define VMA_DEDICATED_ALLOCATION 0
2084  #endif
2085 #endif
2086 
2087 #if !defined(VMA_BIND_MEMORY2)
2088  #if VK_KHR_bind_memory2
2089  #define VMA_BIND_MEMORY2 1
2090  #else
2091  #define VMA_BIND_MEMORY2 0
2092  #endif
2093 #endif
2094 
2095 #if !defined(VMA_MEMORY_BUDGET)
2096  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
2097  #define VMA_MEMORY_BUDGET 1
2098  #else
2099  #define VMA_MEMORY_BUDGET 0
2100  #endif
2101 #endif
2102 
2103 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
2104 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
2105  #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
2106  #define VMA_BUFFER_DEVICE_ADDRESS 1
2107  #else
2108  #define VMA_BUFFER_DEVICE_ADDRESS 0
2109  #endif
2110 #endif
2111 
2112 // Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
2113 #if !defined(VMA_MEMORY_PRIORITY)
2114  #if VK_EXT_memory_priority
2115  #define VMA_MEMORY_PRIORITY 1
2116  #else
2117  #define VMA_MEMORY_PRIORITY 0
2118  #endif
2119 #endif
2120 
2121 // Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
2122 #if !defined(VMA_EXTERNAL_MEMORY)
2123  #if VK_KHR_external_memory
2124  #define VMA_EXTERNAL_MEMORY 1
2125  #else
2126  #define VMA_EXTERNAL_MEMORY 0
2127  #endif
2128 #endif
2129 
2130 // Define these macros to decorate all public functions with additional code,
2131 // before and after returned type, appropriately. This may be useful for
2132 // exporting the functions when compiling VMA as a separate library. Example:
2133 // #define VMA_CALL_PRE __declspec(dllexport)
2134 // #define VMA_CALL_POST __cdecl
2135 #ifndef VMA_CALL_PRE
2136  #define VMA_CALL_PRE
2137 #endif
2138 #ifndef VMA_CALL_POST
2139  #define VMA_CALL_POST
2140 #endif
2141 
2142 // Define this macro to decorate pointers with an attribute specifying the
2143 // length of the array they point to if they are not null.
2144 //
2145 // The length may be one of
2146 // - The name of another parameter in the argument list where the pointer is declared
2147 // - The name of another member in the struct where the pointer is declared
2148 // - The name of a member of a struct type, meaning the value of that member in
2149 // the context of the call. For example
2150 // VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
2151 // this means the number of memory heaps available in the device associated
2152 // with the VmaAllocator being dealt with.
2153 #ifndef VMA_LEN_IF_NOT_NULL
2154  #define VMA_LEN_IF_NOT_NULL(len)
2155 #endif
2156 
2157 // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
2158 // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
2159 #ifndef VMA_NULLABLE
2160  #ifdef __clang__
2161  #define VMA_NULLABLE _Nullable
2162  #else
2163  #define VMA_NULLABLE
2164  #endif
2165 #endif
2166 
2167 // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
2168 // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
2169 #ifndef VMA_NOT_NULL
2170  #ifdef __clang__
2171  #define VMA_NOT_NULL _Nonnull
2172  #else
2173  #define VMA_NOT_NULL
2174  #endif
2175 #endif
2176 
2177 // If non-dispatchable handles are represented as pointers then we can give
2178 // then nullability annotations
2179 #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
2180  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2181  #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
2182  #else
2183  #define VMA_NOT_NULL_NON_DISPATCHABLE
2184  #endif
2185 #endif
2186 
2187 #ifndef VMA_NULLABLE_NON_DISPATCHABLE
2188  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2189  #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
2190  #else
2191  #define VMA_NULLABLE_NON_DISPATCHABLE
2192  #endif
2193 #endif
2194 
2204 VK_DEFINE_HANDLE(VmaAllocator)
2205 
2206 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
2208  VmaAllocator VMA_NOT_NULL allocator,
2209  uint32_t memoryType,
2210  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2211  VkDeviceSize size,
2212  void* VMA_NULLABLE pUserData);
2214 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
2215  VmaAllocator VMA_NOT_NULL allocator,
2216  uint32_t memoryType,
2217  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2218  VkDeviceSize size,
2219  void* VMA_NULLABLE pUserData);
2220 
2234  void* VMA_NULLABLE pUserData;
2236 
2349 
2352 typedef VkFlags VmaAllocatorCreateFlags;
2353 
2358 typedef struct VmaVulkanFunctions {
2359  PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
2360  PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
2361  PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
2362  PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
2363  PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
2364  PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
2365  PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
2366  PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
2367  PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
2368  PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
2369  PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
2370  PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
2371  PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
2372  PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
2373  PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
2374  PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
2375  PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
2376 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2377  PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
2378  PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
2379 #endif
2380 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2381  PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
2382  PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
2383 #endif
2384 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2385  PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
2386 #endif
2388 
2390 typedef enum VmaRecordFlagBits {
2397 
2398  VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
2400 typedef VkFlags VmaRecordFlags;
2401 
2403 typedef struct VmaRecordSettings
2404 {
2414  const char* VMA_NOT_NULL pFilePath;
2416 
2419 {
2423 
2424  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2426 
2427  VkDevice VMA_NOT_NULL device;
2429 
2432 
2433  const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
2435 
2475  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
2476 
2488  const VmaRecordSettings* VMA_NULLABLE pRecordSettings;
2493  VkInstance VMA_NOT_NULL instance;
2503 #if VMA_EXTERNAL_MEMORY
2513  const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
2514 #endif // #if VMA_EXTERNAL_MEMORY
2516 
2518 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2519  const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
2520  VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator);
2521 
2523 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2524  VmaAllocator VMA_NULLABLE allocator);
2525 
2528 typedef struct VmaAllocatorInfo
2529 {
2534  VkInstance VMA_NOT_NULL instance;
2539  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2544  VkDevice VMA_NOT_NULL device;
2546 
2552 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
2553 
2558 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2559  VmaAllocator VMA_NOT_NULL allocator,
2560  const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties);
2561 
2566 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2567  VmaAllocator VMA_NOT_NULL allocator,
2568  const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
2569 
2576 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2577  VmaAllocator VMA_NOT_NULL allocator,
2578  uint32_t memoryTypeIndex,
2579  VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
2580 
2589 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2590  VmaAllocator VMA_NOT_NULL allocator,
2591  uint32_t frameIndex);
2592 
2595 typedef struct VmaStatInfo
2596 {
2598  uint32_t blockCount;
2604  VkDeviceSize usedBytes;
2606  VkDeviceSize unusedBytes;
2610 
2612 typedef struct VmaStats
2613 {
2614  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2615  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2618 
2628 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2629  VmaAllocator VMA_NOT_NULL allocator,
2630  VmaStats* VMA_NOT_NULL pStats);
2631 
2634 typedef struct VmaBudget
2635 {
2638  VkDeviceSize blockBytes;
2639 
2649  VkDeviceSize allocationBytes;
2650 
2659  VkDeviceSize usage;
2660 
2670  VkDeviceSize budget;
2672 
2683 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2684  VmaAllocator VMA_NOT_NULL allocator,
2685  VmaBudget* VMA_NOT_NULL pBudget);
2686 
2687 #ifndef VMA_STATS_STRING_ENABLED
2688 #define VMA_STATS_STRING_ENABLED 1
2689 #endif
2690 
2691 #if VMA_STATS_STRING_ENABLED
2692 
2694 
2696 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2697  VmaAllocator VMA_NOT_NULL allocator,
2698  char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString,
2699  VkBool32 detailedMap);
2700 
2701 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2702  VmaAllocator VMA_NOT_NULL allocator,
2703  char* VMA_NULLABLE pStatsString);
2704 
2705 #endif // #if VMA_STATS_STRING_ENABLED
2706 
2715 VK_DEFINE_HANDLE(VmaPool)
2716 
2717 typedef enum VmaMemoryUsage
2718 {
2780 
2781  VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
2783 
2793 
2858 
2874 
2884 
2891 
2895 
2897 {
2910  VkMemoryPropertyFlags requiredFlags;
2915  VkMemoryPropertyFlags preferredFlags;
2923  uint32_t memoryTypeBits;
2929  VmaPool VMA_NULLABLE pool;
2936  void* VMA_NULLABLE pUserData;
2943  float priority;
2945 
2962 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2963  VmaAllocator VMA_NOT_NULL allocator,
2964  uint32_t memoryTypeBits,
2965  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2966  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2967 
2980 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2981  VmaAllocator VMA_NOT_NULL allocator,
2982  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2983  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2984  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2985 
2998 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2999  VmaAllocator VMA_NOT_NULL allocator,
3000  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
3001  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3002  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
3003 
3024 
3041 
3052 
3058 
3061 typedef VkFlags VmaPoolCreateFlags;
3062 
3065 typedef struct VmaPoolCreateInfo {
3080  VkDeviceSize blockSize;
3113  float priority;
3130  void* VMA_NULLABLE pMemoryAllocateNext;
3132 
3135 typedef struct VmaPoolStats {
3138  VkDeviceSize size;
3141  VkDeviceSize unusedSize;
3154  VkDeviceSize unusedRangeSizeMax;
3157  size_t blockCount;
3159 
3166 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
3167  VmaAllocator VMA_NOT_NULL allocator,
3168  const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
3169  VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool);
3170 
3173 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
3174  VmaAllocator VMA_NOT_NULL allocator,
3175  VmaPool VMA_NULLABLE pool);
3176 
3183 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
3184  VmaAllocator VMA_NOT_NULL allocator,
3185  VmaPool VMA_NOT_NULL pool,
3186  VmaPoolStats* VMA_NOT_NULL pPoolStats);
3187 
3194 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
3195  VmaAllocator VMA_NOT_NULL allocator,
3196  VmaPool VMA_NOT_NULL pool,
3197  size_t* VMA_NULLABLE pLostAllocationCount);
3198 
3213 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool);
3214 
3221 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
3222  VmaAllocator VMA_NOT_NULL allocator,
3223  VmaPool VMA_NOT_NULL pool,
3224  const char* VMA_NULLABLE * VMA_NOT_NULL ppName);
3225 
3231 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
3232  VmaAllocator VMA_NOT_NULL allocator,
3233  VmaPool VMA_NOT_NULL pool,
3234  const char* VMA_NULLABLE pName);
3235 
3260 VK_DEFINE_HANDLE(VmaAllocation)
3261 
3262 
3264 typedef struct VmaAllocationInfo {
3269  uint32_t memoryType;
3278  VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
3288  VkDeviceSize offset;
3299  VkDeviceSize size;
3308  void* VMA_NULLABLE pMappedData;
3313  void* VMA_NULLABLE pUserData;
3315 
3326 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3327  VmaAllocator VMA_NOT_NULL allocator,
3328  const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
3329  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3330  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3331  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3332 
3352 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3353  VmaAllocator VMA_NOT_NULL allocator,
3354  const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
3355  const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
3356  size_t allocationCount,
3357  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3358  VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
3359 
3366 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3367  VmaAllocator VMA_NOT_NULL allocator,
3368  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3369  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3370  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3371  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3372 
3374 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3375  VmaAllocator VMA_NOT_NULL allocator,
3376  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3377  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3378  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3379  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3380 
3385 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3386  VmaAllocator VMA_NOT_NULL allocator,
3387  const VmaAllocation VMA_NULLABLE allocation);
3388 
3399 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3400  VmaAllocator VMA_NOT_NULL allocator,
3401  size_t allocationCount,
3402  const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
3403 
3420 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3421  VmaAllocator VMA_NOT_NULL allocator,
3422  VmaAllocation VMA_NOT_NULL allocation,
3423  VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
3424 
3439 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3440  VmaAllocator VMA_NOT_NULL allocator,
3441  VmaAllocation VMA_NOT_NULL allocation);
3442 
3456 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3457  VmaAllocator VMA_NOT_NULL allocator,
3458  VmaAllocation VMA_NOT_NULL allocation,
3459  void* VMA_NULLABLE pUserData);
3460 
3471 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3472  VmaAllocator VMA_NOT_NULL allocator,
3473  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation);
3474 
3513 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3514  VmaAllocator VMA_NOT_NULL allocator,
3515  VmaAllocation VMA_NOT_NULL allocation,
3516  void* VMA_NULLABLE * VMA_NOT_NULL ppData);
3517 
3526 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3527  VmaAllocator VMA_NOT_NULL allocator,
3528  VmaAllocation VMA_NOT_NULL allocation);
3529 
3551 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
3552  VmaAllocator VMA_NOT_NULL allocator,
3553  VmaAllocation VMA_NOT_NULL allocation,
3554  VkDeviceSize offset,
3555  VkDeviceSize size);
3556 
3578 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
3579  VmaAllocator VMA_NOT_NULL allocator,
3580  VmaAllocation VMA_NOT_NULL allocation,
3581  VkDeviceSize offset,
3582  VkDeviceSize size);
3583 
3598 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
3599  VmaAllocator VMA_NOT_NULL allocator,
3600  uint32_t allocationCount,
3601  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3602  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3603  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3604 
3619 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
3620  VmaAllocator VMA_NOT_NULL allocator,
3621  uint32_t allocationCount,
3622  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3623  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3624  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3625 
3642 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits);
3643 
3650 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3651 
3652 typedef enum VmaDefragmentationFlagBits {
3657 typedef VkFlags VmaDefragmentationFlags;
3658 
3663 typedef struct VmaDefragmentationInfo2 {
3678  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
3684  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged;
3687  uint32_t poolCount;
3703  const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
3708  VkDeviceSize maxCpuBytesToMove;
3718  VkDeviceSize maxGpuBytesToMove;
3732  VkCommandBuffer VMA_NULLABLE commandBuffer;
3734 
3737  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory;
3738  VkDeviceSize offset;
3740 
3746  uint32_t moveCount;
3747  VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
3749 
3754 typedef struct VmaDefragmentationInfo {
3759  VkDeviceSize maxBytesToMove;
3766 
3768 typedef struct VmaDefragmentationStats {
3770  VkDeviceSize bytesMoved;
3772  VkDeviceSize bytesFreed;
3778 
3808 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3809  VmaAllocator VMA_NOT_NULL allocator,
3810  const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo,
3811  VmaDefragmentationStats* VMA_NULLABLE pStats,
3812  VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext);
3813 
3819 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3820  VmaAllocator VMA_NOT_NULL allocator,
3821  VmaDefragmentationContext VMA_NULLABLE context);
3822 
3823 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3824  VmaAllocator VMA_NOT_NULL allocator,
3825  VmaDefragmentationContext VMA_NULLABLE context,
3826  VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo
3827 );
3828 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3829  VmaAllocator VMA_NOT_NULL allocator,
3830  VmaDefragmentationContext VMA_NULLABLE context
3831 );
3832 
3873 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3874  VmaAllocator VMA_NOT_NULL allocator,
3875  const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3876  size_t allocationCount,
3877  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
3878  const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo,
3879  VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats);
3880 
3893 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3894  VmaAllocator VMA_NOT_NULL allocator,
3895  VmaAllocation VMA_NOT_NULL allocation,
3896  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
3897 
3908 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3909  VmaAllocator VMA_NOT_NULL allocator,
3910  VmaAllocation VMA_NOT_NULL allocation,
3911  VkDeviceSize allocationLocalOffset,
3912  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3913  const void* VMA_NULLABLE pNext);
3914 
3927 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3928  VmaAllocator VMA_NOT_NULL allocator,
3929  VmaAllocation VMA_NOT_NULL allocation,
3930  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
3931 
3942 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3943  VmaAllocator VMA_NOT_NULL allocator,
3944  VmaAllocation VMA_NOT_NULL allocation,
3945  VkDeviceSize allocationLocalOffset,
3946  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3947  const void* VMA_NULLABLE pNext);
3948 
3979 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3980  VmaAllocator VMA_NOT_NULL allocator,
3981  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
3982  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3983  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
3984  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3985  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3986 
3993 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
3994  VmaAllocator VMA_NOT_NULL allocator,
3995  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
3996  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3997  VkDeviceSize minAlignment,
3998  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
3999  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
4000  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
4001 
4013 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
4014  VmaAllocator VMA_NOT_NULL allocator,
4015  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
4016  VmaAllocation VMA_NULLABLE allocation);
4017 
4019 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
4020  VmaAllocator VMA_NOT_NULL allocator,
4021  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
4022  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
4023  VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
4024  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
4025  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
4026 
4038 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
4039  VmaAllocator VMA_NOT_NULL allocator,
4040  VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
4041  VmaAllocation VMA_NULLABLE allocation);
4042 
4043 #ifdef __cplusplus
4044 }
4045 #endif
4046 
4047 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
4048 
4049 // For Visual Studio IntelliSense.
4050 #if defined(__cplusplus) && defined(__INTELLISENSE__)
4051 #define VMA_IMPLEMENTATION
4052 #endif
4053 
4054 #ifdef VMA_IMPLEMENTATION
4055 #undef VMA_IMPLEMENTATION
4056 
4057 #include <cstdint>
4058 #include <cstdlib>
4059 #include <cstring>
4060 #include <utility>
4061 
4062 #if VMA_RECORDING_ENABLED
4063  #include <chrono>
4064  #if defined(_WIN32)
4065  #include <windows.h>
4066  #else
4067  #include <sstream>
4068  #include <thread>
4069  #endif
4070 #endif
4071 
4072 /*******************************************************************************
4073 CONFIGURATION SECTION
4074 
4075 Define some of these macros before each #include of this header or change them
4076 here if you need other then default behavior depending on your environment.
4077 */
4078 
4079 /*
4080 Define this macro to 1 to make the library fetch pointers to Vulkan functions
4081 internally, like:
4082 
4083  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
4084 */
4085 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
4086  #define VMA_STATIC_VULKAN_FUNCTIONS 1
4087 #endif
4088 
4089 /*
4090 Define this macro to 1 to make the library fetch pointers to Vulkan functions
4091 internally, like:
4092 
4093  vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory);
4094 */
4095 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
4096  #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
4097  #if defined(VK_NO_PROTOTYPES)
4098  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
4099  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
4100  #endif
4101 #endif
4102 
4103 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
4104 //#define VMA_USE_STL_CONTAINERS 1
4105 
4106 /* Set this macro to 1 to make the library including and using STL containers:
4107 std::pair, std::vector, std::list, std::unordered_map.
4108 
4109 Set it to 0 or undefined to make the library using its own implementation of
4110 the containers.
4111 */
4112 #if VMA_USE_STL_CONTAINERS
4113  #define VMA_USE_STL_VECTOR 1
4114  #define VMA_USE_STL_UNORDERED_MAP 1
4115  #define VMA_USE_STL_LIST 1
4116 #endif
4117 
4118 #ifndef VMA_USE_STL_SHARED_MUTEX
4119  // Compiler conforms to C++17.
4120  #if __cplusplus >= 201703L
4121  #define VMA_USE_STL_SHARED_MUTEX 1
4122  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
4123  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
4124  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
4125  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
4126  #define VMA_USE_STL_SHARED_MUTEX 1
4127  #else
4128  #define VMA_USE_STL_SHARED_MUTEX 0
4129  #endif
4130 #endif
4131 
4132 /*
4133 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
4134 Library has its own container implementation.
4135 */
4136 #if VMA_USE_STL_VECTOR
4137  #include <vector>
4138 #endif
4139 
4140 #if VMA_USE_STL_UNORDERED_MAP
4141  #include <unordered_map>
4142 #endif
4143 
4144 #if VMA_USE_STL_LIST
4145  #include <list>
4146 #endif
4147 
4148 /*
4149 Following headers are used in this CONFIGURATION section only, so feel free to
4150 remove them if not needed.
4151 */
4152 #include <cassert> // for assert
4153 #include <algorithm> // for min, max
4154 #include <mutex>
4155 
4156 #ifndef VMA_NULL
4157  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
4158  #define VMA_NULL nullptr
4159 #endif
4160 
4161 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
4162 #include <cstdlib>
4163 static void* vma_aligned_alloc(size_t alignment, size_t size)
4164 {
4165  // alignment must be >= sizeof(void*)
4166  if(alignment < sizeof(void*))
4167  {
4168  alignment = sizeof(void*);
4169  }
4170 
4171  return memalign(alignment, size);
4172 }
4173 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
4174 #include <cstdlib>
4175 
4176 #if defined(__APPLE__)
4177 #include <AvailabilityMacros.h>
4178 #endif
4179 
4180 static void* vma_aligned_alloc(size_t alignment, size_t size)
4181 {
4182  // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
4183  // Therefore, for now disable this specific exception until a proper solution is found.
4184  //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
4185  //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
4186  // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
4187  // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
4188  // // MAC_OS_X_VERSION_10_16), even though the function is marked
4189  // // availabe for 10.15. That's why the preprocessor checks for 10.16 but
4190  // // the __builtin_available checks for 10.15.
4191  // // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
4192  // if (__builtin_available(macOS 10.15, iOS 13, *))
4193  // return aligned_alloc(alignment, size);
4194  //#endif
4195  //#endif
4196 
4197  // alignment must be >= sizeof(void*)
4198  if(alignment < sizeof(void*))
4199  {
4200  alignment = sizeof(void*);
4201  }
4202 
4203  void *pointer;
4204  if(posix_memalign(&pointer, alignment, size) == 0)
4205  return pointer;
4206  return VMA_NULL;
4207 }
4208 #elif defined(_WIN32)
4209 static void* vma_aligned_alloc(size_t alignment, size_t size)
4210 {
4211  return _aligned_malloc(size, alignment);
4212 }
4213 #else
4214 static void* vma_aligned_alloc(size_t alignment, size_t size)
4215 {
4216  return aligned_alloc(alignment, size);
4217 }
4218 #endif
4219 
4220 #if defined(_WIN32)
4221 static void vma_aligned_free(void* ptr)
4222 {
4223  _aligned_free(ptr);
4224 }
4225 #else
4226 static void vma_aligned_free(void* VMA_NULLABLE ptr)
4227 {
4228  free(ptr);
4229 }
4230 #endif
4231 
4232 // If your compiler is not compatible with C++11 and definition of
4233 // aligned_alloc() function is missing, uncommeting following line may help:
4234 
4235 //#include <malloc.h>
4236 
4237 // Normal assert to check for programmer's errors, especially in Debug configuration.
4238 #ifndef VMA_ASSERT
4239  #ifdef NDEBUG
4240  #define VMA_ASSERT(expr)
4241  #else
4242  #define VMA_ASSERT(expr) assert(expr)
4243  #endif
4244 #endif
4245 
4246 // Assert that will be called very often, like inside data structures e.g. operator[].
4247 // Making it non-empty can make program slow.
4248 #ifndef VMA_HEAVY_ASSERT
4249  #ifdef NDEBUG
4250  #define VMA_HEAVY_ASSERT(expr)
4251  #else
4252  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
4253  #endif
4254 #endif
4255 
4256 #ifndef VMA_ALIGN_OF
4257  #define VMA_ALIGN_OF(type) (__alignof(type))
4258 #endif
4259 
4260 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
4261  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
4262 #endif
4263 
4264 #ifndef VMA_SYSTEM_ALIGNED_FREE
4265  // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
4266  #if defined(VMA_SYSTEM_FREE)
4267  #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
4268  #else
4269  #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
4270  #endif
4271 #endif
4272 
4273 #ifndef VMA_MIN
4274  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
4275 #endif
4276 
4277 #ifndef VMA_MAX
4278  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
4279 #endif
4280 
4281 #ifndef VMA_SWAP
4282  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
4283 #endif
4284 
4285 #ifndef VMA_SORT
4286  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
4287 #endif
4288 
4289 #ifndef VMA_DEBUG_LOG
4290  #define VMA_DEBUG_LOG(format, ...)
4291  /*
4292  #define VMA_DEBUG_LOG(format, ...) do { \
4293  printf(format, __VA_ARGS__); \
4294  printf("\n"); \
4295  } while(false)
4296  */
4297 #endif
4298 
4299 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
4300 #if VMA_STATS_STRING_ENABLED
4301  static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
4302  {
4303  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
4304  }
4305  static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
4306  {
4307  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
4308  }
4309  static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
4310  {
4311  snprintf(outStr, strLen, "%p", ptr);
4312  }
4313 #endif
4314 
4315 #ifndef VMA_MUTEX
4316  class VmaMutex
4317  {
4318  public:
4319  void Lock() { m_Mutex.lock(); }
4320  void Unlock() { m_Mutex.unlock(); }
4321  bool TryLock() { return m_Mutex.try_lock(); }
4322  private:
4323  std::mutex m_Mutex;
4324  };
4325  #define VMA_MUTEX VmaMutex
4326 #endif
4327 
4328 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
4329 #ifndef VMA_RW_MUTEX
4330  #if VMA_USE_STL_SHARED_MUTEX
4331  // Use std::shared_mutex from C++17.
4332  #include <shared_mutex>
4333  class VmaRWMutex
4334  {
4335  public:
4336  void LockRead() { m_Mutex.lock_shared(); }
4337  void UnlockRead() { m_Mutex.unlock_shared(); }
4338  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
4339  void LockWrite() { m_Mutex.lock(); }
4340  void UnlockWrite() { m_Mutex.unlock(); }
4341  bool TryLockWrite() { return m_Mutex.try_lock(); }
4342  private:
4343  std::shared_mutex m_Mutex;
4344  };
4345  #define VMA_RW_MUTEX VmaRWMutex
4346  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
4347  // Use SRWLOCK from WinAPI.
4348  // Minimum supported client = Windows Vista, server = Windows Server 2008.
4349  class VmaRWMutex
4350  {
4351  public:
4352  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
4353  void LockRead() { AcquireSRWLockShared(&m_Lock); }
4354  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
4355  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
4356  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
4357  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
4358  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
4359  private:
4360  SRWLOCK m_Lock;
4361  };
4362  #define VMA_RW_MUTEX VmaRWMutex
4363  #else
4364  // Less efficient fallback: Use normal mutex.
4365  class VmaRWMutex
4366  {
4367  public:
4368  void LockRead() { m_Mutex.Lock(); }
4369  void UnlockRead() { m_Mutex.Unlock(); }
4370  bool TryLockRead() { return m_Mutex.TryLock(); }
4371  void LockWrite() { m_Mutex.Lock(); }
4372  void UnlockWrite() { m_Mutex.Unlock(); }
4373  bool TryLockWrite() { return m_Mutex.TryLock(); }
4374  private:
4375  VMA_MUTEX m_Mutex;
4376  };
4377  #define VMA_RW_MUTEX VmaRWMutex
4378  #endif // #if VMA_USE_STL_SHARED_MUTEX
4379 #endif // #ifndef VMA_RW_MUTEX
4380 
4381 /*
4382 If providing your own implementation, you need to implement a subset of std::atomic.
4383 */
4384 #ifndef VMA_ATOMIC_UINT32
4385  #include <atomic>
4386  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
4387 #endif
4388 
4389 #ifndef VMA_ATOMIC_UINT64
4390  #include <atomic>
4391  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
4392 #endif
4393 
4394 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
4399  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
4400 #endif
4401 
4402 #ifndef VMA_MIN_ALIGNMENT
4407  #ifdef VMA_DEBUG_ALIGNMENT // Old name
4408  #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
4409  #else
4410  #define VMA_MIN_ALIGNMENT (1)
4411  #endif
4412 #endif
4413 
4414 #ifndef VMA_DEBUG_MARGIN
4419  #define VMA_DEBUG_MARGIN (0)
4420 #endif
4421 
4422 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
4427  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
4428 #endif
4429 
4430 #ifndef VMA_DEBUG_DETECT_CORRUPTION
4436  #define VMA_DEBUG_DETECT_CORRUPTION (0)
4437 #endif
4438 
4439 #ifndef VMA_DEBUG_GLOBAL_MUTEX
4444  #define VMA_DEBUG_GLOBAL_MUTEX (0)
4445 #endif
4446 
4447 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4452  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
4453 #endif
4454 
4455 #ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
4456  /*
4457  Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
4458  and return error instead of leaving up to Vulkan implementation what to do in such cases.
4459  */
4460  #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
4461 #endif
4462 
4463 #ifndef VMA_SMALL_HEAP_MAX_SIZE
4465  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
4466 #endif
4467 
4468 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
4470  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
4471 #endif
4472 
4473 #ifndef VMA_CLASS_NO_COPY
4474  #define VMA_CLASS_NO_COPY(className) \
4475  private: \
4476  className(const className&) = delete; \
4477  className& operator=(const className&) = delete;
4478 #endif
4479 
4480 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4481 
4482 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4483 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4484 
4485 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4486 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4487 
4488 /*******************************************************************************
4489 END OF CONFIGURATION
4490 */
4491 
4492 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4493 
4494 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4495 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4496 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4497 
4498 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4499 
4500 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4501  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4502 
4503 // Returns number of bits set to 1 in (v).
4504 static inline uint32_t VmaCountBitsSet(uint32_t v)
4505 {
4506  uint32_t c = v - ((v >> 1) & 0x55555555);
4507  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4508  c = ((c >> 4) + c) & 0x0F0F0F0F;
4509  c = ((c >> 8) + c) & 0x00FF00FF;
4510  c = ((c >> 16) + c) & 0x0000FFFF;
4511  return c;
4512 }
4513 
4514 /*
4515 Returns true if given number is a power of two.
4516 T must be unsigned integer number or signed integer but always nonnegative.
4517 For 0 returns true.
4518 */
4519 template <typename T>
4520 inline bool VmaIsPow2(T x)
4521 {
4522  return (x & (x-1)) == 0;
4523 }
4524 
4525 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4526 // Use types like uint32_t, uint64_t as T.
4527 template <typename T>
4528 static inline T VmaAlignUp(T val, T alignment)
4529 {
4530  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4531  return (val + alignment - 1) & ~(alignment - 1);
4532 }
4533 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4534 // Use types like uint32_t, uint64_t as T.
4535 template <typename T>
4536 static inline T VmaAlignDown(T val, T alignment)
4537 {
4538  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4539  return val & ~(alignment - 1);
4540 }
4541 
4542 // Division with mathematical rounding to nearest number.
4543 template <typename T>
4544 static inline T VmaRoundDiv(T x, T y)
4545 {
4546  return (x + (y / (T)2)) / y;
4547 }
4548 
4549 // Returns smallest power of 2 greater or equal to v.
4550 static inline uint32_t VmaNextPow2(uint32_t v)
4551 {
4552  v--;
4553  v |= v >> 1;
4554  v |= v >> 2;
4555  v |= v >> 4;
4556  v |= v >> 8;
4557  v |= v >> 16;
4558  v++;
4559  return v;
4560 }
4561 static inline uint64_t VmaNextPow2(uint64_t v)
4562 {
4563  v--;
4564  v |= v >> 1;
4565  v |= v >> 2;
4566  v |= v >> 4;
4567  v |= v >> 8;
4568  v |= v >> 16;
4569  v |= v >> 32;
4570  v++;
4571  return v;
4572 }
4573 
4574 // Returns largest power of 2 less or equal to v.
4575 static inline uint32_t VmaPrevPow2(uint32_t v)
4576 {
4577  v |= v >> 1;
4578  v |= v >> 2;
4579  v |= v >> 4;
4580  v |= v >> 8;
4581  v |= v >> 16;
4582  v = v ^ (v >> 1);
4583  return v;
4584 }
4585 static inline uint64_t VmaPrevPow2(uint64_t v)
4586 {
4587  v |= v >> 1;
4588  v |= v >> 2;
4589  v |= v >> 4;
4590  v |= v >> 8;
4591  v |= v >> 16;
4592  v |= v >> 32;
4593  v = v ^ (v >> 1);
4594  return v;
4595 }
4596 
4597 static inline bool VmaStrIsEmpty(const char* pStr)
4598 {
4599  return pStr == VMA_NULL || *pStr == '\0';
4600 }
4601 
4602 #if VMA_STATS_STRING_ENABLED
4603 
4604 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4605 {
4606  switch(algorithm)
4607  {
4609  return "Linear";
4611  return "Buddy";
4612  case 0:
4613  return "Default";
4614  default:
4615  VMA_ASSERT(0);
4616  return "";
4617  }
4618 }
4619 
4620 #endif // #if VMA_STATS_STRING_ENABLED
4621 
4622 #ifndef VMA_SORT
4623 
4624 template<typename Iterator, typename Compare>
4625 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4626 {
4627  Iterator centerValue = end; --centerValue;
4628  Iterator insertIndex = beg;
4629  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4630  {
4631  if(cmp(*memTypeIndex, *centerValue))
4632  {
4633  if(insertIndex != memTypeIndex)
4634  {
4635  VMA_SWAP(*memTypeIndex, *insertIndex);
4636  }
4637  ++insertIndex;
4638  }
4639  }
4640  if(insertIndex != centerValue)
4641  {
4642  VMA_SWAP(*insertIndex, *centerValue);
4643  }
4644  return insertIndex;
4645 }
4646 
4647 template<typename Iterator, typename Compare>
4648 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4649 {
4650  if(beg < end)
4651  {
4652  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4653  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4654  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4655  }
4656 }
4657 
4658 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4659 
4660 #endif // #ifndef VMA_SORT
4661 
4662 /*
4663 Returns true if two memory blocks occupy overlapping pages.
4664 ResourceA must be in less memory offset than ResourceB.
4665 
4666 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4667 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4668 */
4669 static inline bool VmaBlocksOnSamePage(
4670  VkDeviceSize resourceAOffset,
4671  VkDeviceSize resourceASize,
4672  VkDeviceSize resourceBOffset,
4673  VkDeviceSize pageSize)
4674 {
4675  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4676  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4677  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4678  VkDeviceSize resourceBStart = resourceBOffset;
4679  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4680  return resourceAEndPage == resourceBStartPage;
4681 }
4682 
4683 enum VmaSuballocationType
4684 {
4685  VMA_SUBALLOCATION_TYPE_FREE = 0,
4686  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4687  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4688  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4689  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4690  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4691  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4692 };
4693 
4694 /*
4695 Returns true if given suballocation types could conflict and must respect
4696 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4697 or linear image and another one is optimal image. If type is unknown, behave
4698 conservatively.
4699 */
4700 static inline bool VmaIsBufferImageGranularityConflict(
4701  VmaSuballocationType suballocType1,
4702  VmaSuballocationType suballocType2)
4703 {
4704  if(suballocType1 > suballocType2)
4705  {
4706  VMA_SWAP(suballocType1, suballocType2);
4707  }
4708 
4709  switch(suballocType1)
4710  {
4711  case VMA_SUBALLOCATION_TYPE_FREE:
4712  return false;
4713  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4714  return true;
4715  case VMA_SUBALLOCATION_TYPE_BUFFER:
4716  return
4717  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4718  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4719  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4720  return
4721  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4722  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4723  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4724  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4725  return
4726  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4727  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4728  return false;
4729  default:
4730  VMA_ASSERT(0);
4731  return true;
4732  }
4733 }
4734 
4735 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4736 {
4737 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4738  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4739  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4740  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4741  {
4742  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4743  }
4744 #else
4745  // no-op
4746 #endif
4747 }
4748 
4749 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4750 {
4751 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4752  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4753  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4754  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4755  {
4756  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4757  {
4758  return false;
4759  }
4760  }
4761 #endif
4762  return true;
4763 }
4764 
4765 /*
4766 Fills structure with parameters of an example buffer to be used for transfers
4767 during GPU memory defragmentation.
4768 */
4769 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4770 {
4771  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4772  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4773  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4774  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4775 }
4776 
4777 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4778 struct VmaMutexLock
4779 {
4780  VMA_CLASS_NO_COPY(VmaMutexLock)
4781 public:
4782  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4783  m_pMutex(useMutex ? &mutex : VMA_NULL)
4784  { if(m_pMutex) { m_pMutex->Lock(); } }
4785  ~VmaMutexLock()
4786  { if(m_pMutex) { m_pMutex->Unlock(); } }
4787 private:
4788  VMA_MUTEX* m_pMutex;
4789 };
4790 
4791 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4792 struct VmaMutexLockRead
4793 {
4794  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4795 public:
4796  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4797  m_pMutex(useMutex ? &mutex : VMA_NULL)
4798  { if(m_pMutex) { m_pMutex->LockRead(); } }
4799  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4800 private:
4801  VMA_RW_MUTEX* m_pMutex;
4802 };
4803 
4804 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4805 struct VmaMutexLockWrite
4806 {
4807  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4808 public:
4809  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4810  m_pMutex(useMutex ? &mutex : VMA_NULL)
4811  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4812  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4813 private:
4814  VMA_RW_MUTEX* m_pMutex;
4815 };
4816 
4817 #if VMA_DEBUG_GLOBAL_MUTEX
4818  static VMA_MUTEX gDebugGlobalMutex;
4819  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4820 #else
4821  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4822 #endif
4823 
4824 // Minimum size of a free suballocation to register it in the free suballocation collection.
4825 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4826 
4827 /*
4828 Performs binary search and returns iterator to first element that is greater or
4829 equal to (key), according to comparison (cmp).
4830 
4831 Cmp should return true if first argument is less than second argument.
4832 
4833 Returned value is the found element, if present in the collection or place where
4834 new element with value (key) should be inserted.
4835 */
4836 template <typename CmpLess, typename IterT, typename KeyT>
4837 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4838 {
4839  size_t down = 0, up = (end - beg);
4840  while(down < up)
4841  {
4842  const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
4843  if(cmp(*(beg+mid), key))
4844  {
4845  down = mid + 1;
4846  }
4847  else
4848  {
4849  up = mid;
4850  }
4851  }
4852  return beg + down;
4853 }
4854 
4855 template<typename CmpLess, typename IterT, typename KeyT>
4856 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4857 {
4858  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4859  beg, end, value, cmp);
4860  if(it == end ||
4861  (!cmp(*it, value) && !cmp(value, *it)))
4862  {
4863  return it;
4864  }
4865  return end;
4866 }
4867 
4868 /*
4869 Returns true if all pointers in the array are not-null and unique.
4870 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4871 T must be pointer type, e.g. VmaAllocation, VmaPool.
4872 */
4873 template<typename T>
4874 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4875 {
4876  for(uint32_t i = 0; i < count; ++i)
4877  {
4878  const T iPtr = arr[i];
4879  if(iPtr == VMA_NULL)
4880  {
4881  return false;
4882  }
4883  for(uint32_t j = i + 1; j < count; ++j)
4884  {
4885  if(iPtr == arr[j])
4886  {
4887  return false;
4888  }
4889  }
4890  }
4891  return true;
4892 }
4893 
4894 template<typename MainT, typename NewT>
4895 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4896 {
4897  newStruct->pNext = mainStruct->pNext;
4898  mainStruct->pNext = newStruct;
4899 }
4900 
4902 // Memory allocation
4903 
4904 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4905 {
4906  void* result = VMA_NULL;
4907  if((pAllocationCallbacks != VMA_NULL) &&
4908  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4909  {
4910  result = (*pAllocationCallbacks->pfnAllocation)(
4911  pAllocationCallbacks->pUserData,
4912  size,
4913  alignment,
4914  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4915  }
4916  else
4917  {
4918  result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4919  }
4920  VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
4921  return result;
4922 }
4923 
4924 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4925 {
4926  if((pAllocationCallbacks != VMA_NULL) &&
4927  (pAllocationCallbacks->pfnFree != VMA_NULL))
4928  {
4929  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4930  }
4931  else
4932  {
4933  VMA_SYSTEM_ALIGNED_FREE(ptr);
4934  }
4935 }
4936 
4937 template<typename T>
4938 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4939 {
4940  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4941 }
4942 
4943 template<typename T>
4944 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4945 {
4946  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4947 }
4948 
4949 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4950 
4951 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4952 
4953 template<typename T>
4954 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4955 {
4956  ptr->~T();
4957  VmaFree(pAllocationCallbacks, ptr);
4958 }
4959 
4960 template<typename T>
4961 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4962 {
4963  if(ptr != VMA_NULL)
4964  {
4965  for(size_t i = count; i--; )
4966  {
4967  ptr[i].~T();
4968  }
4969  VmaFree(pAllocationCallbacks, ptr);
4970  }
4971 }
4972 
4973 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4974 {
4975  if(srcStr != VMA_NULL)
4976  {
4977  const size_t len = strlen(srcStr);
4978  char* const result = vma_new_array(allocs, char, len + 1);
4979  memcpy(result, srcStr, len + 1);
4980  return result;
4981  }
4982  else
4983  {
4984  return VMA_NULL;
4985  }
4986 }
4987 
4988 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4989 {
4990  if(str != VMA_NULL)
4991  {
4992  const size_t len = strlen(str);
4993  vma_delete_array(allocs, str, len + 1);
4994  }
4995 }
4996 
4997 // STL-compatible allocator.
4998 template<typename T>
4999 class VmaStlAllocator
5000 {
5001 public:
5002  const VkAllocationCallbacks* const m_pCallbacks;
5003  typedef T value_type;
5004 
5005  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
5006  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
5007 
5008  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
5009  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
5010 
5011  template<typename U>
5012  bool operator==(const VmaStlAllocator<U>& rhs) const
5013  {
5014  return m_pCallbacks == rhs.m_pCallbacks;
5015  }
5016  template<typename U>
5017  bool operator!=(const VmaStlAllocator<U>& rhs) const
5018  {
5019  return m_pCallbacks != rhs.m_pCallbacks;
5020  }
5021 
5022  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
5023  VmaStlAllocator(const VmaStlAllocator&) = default;
5024 };
5025 
5026 #if VMA_USE_STL_VECTOR
5027 
5028 #define VmaVector std::vector
5029 
5030 template<typename T, typename allocatorT>
5031 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
5032 {
5033  vec.insert(vec.begin() + index, item);
5034 }
5035 
5036 template<typename T, typename allocatorT>
5037 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
5038 {
5039  vec.erase(vec.begin() + index);
5040 }
5041 
5042 #else // #if VMA_USE_STL_VECTOR
5043 
5044 /* Class with interface compatible with subset of std::vector.
5045 T must be POD because constructors and destructors are not called and memcpy is
5046 used for these objects. */
5047 template<typename T, typename AllocatorT>
5048 class VmaVector
5049 {
5050 public:
5051  typedef T value_type;
5052 
5053  VmaVector(const AllocatorT& allocator) :
5054  m_Allocator(allocator),
5055  m_pArray(VMA_NULL),
5056  m_Count(0),
5057  m_Capacity(0)
5058  {
5059  }
5060 
5061  VmaVector(size_t count, const AllocatorT& allocator) :
5062  m_Allocator(allocator),
5063  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
5064  m_Count(count),
5065  m_Capacity(count)
5066  {
5067  }
5068 
5069  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
5070  // value is unused.
5071  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
5072  : VmaVector(count, allocator) {}
5073 
5074  VmaVector(const VmaVector<T, AllocatorT>& src) :
5075  m_Allocator(src.m_Allocator),
5076  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
5077  m_Count(src.m_Count),
5078  m_Capacity(src.m_Count)
5079  {
5080  if(m_Count != 0)
5081  {
5082  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
5083  }
5084  }
5085 
5086  ~VmaVector()
5087  {
5088  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5089  }
5090 
5091  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
5092  {
5093  if(&rhs != this)
5094  {
5095  resize(rhs.m_Count);
5096  if(m_Count != 0)
5097  {
5098  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
5099  }
5100  }
5101  return *this;
5102  }
5103 
5104  bool empty() const { return m_Count == 0; }
5105  size_t size() const { return m_Count; }
5106  T* data() { return m_pArray; }
5107  const T* data() const { return m_pArray; }
5108 
5109  T& operator[](size_t index)
5110  {
5111  VMA_HEAVY_ASSERT(index < m_Count);
5112  return m_pArray[index];
5113  }
5114  const T& operator[](size_t index) const
5115  {
5116  VMA_HEAVY_ASSERT(index < m_Count);
5117  return m_pArray[index];
5118  }
5119 
5120  T& front()
5121  {
5122  VMA_HEAVY_ASSERT(m_Count > 0);
5123  return m_pArray[0];
5124  }
5125  const T& front() const
5126  {
5127  VMA_HEAVY_ASSERT(m_Count > 0);
5128  return m_pArray[0];
5129  }
5130  T& back()
5131  {
5132  VMA_HEAVY_ASSERT(m_Count > 0);
5133  return m_pArray[m_Count - 1];
5134  }
5135  const T& back() const
5136  {
5137  VMA_HEAVY_ASSERT(m_Count > 0);
5138  return m_pArray[m_Count - 1];
5139  }
5140 
5141  void reserve(size_t newCapacity, bool freeMemory = false)
5142  {
5143  newCapacity = VMA_MAX(newCapacity, m_Count);
5144 
5145  if((newCapacity < m_Capacity) && !freeMemory)
5146  {
5147  newCapacity = m_Capacity;
5148  }
5149 
5150  if(newCapacity != m_Capacity)
5151  {
5152  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
5153  if(m_Count != 0)
5154  {
5155  memcpy(newArray, m_pArray, m_Count * sizeof(T));
5156  }
5157  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5158  m_Capacity = newCapacity;
5159  m_pArray = newArray;
5160  }
5161  }
5162 
5163  void resize(size_t newCount)
5164  {
5165  size_t newCapacity = m_Capacity;
5166  if(newCount > m_Capacity)
5167  {
5168  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
5169  }
5170 
5171  if(newCapacity != m_Capacity)
5172  {
5173  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
5174  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
5175  if(elementsToCopy != 0)
5176  {
5177  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
5178  }
5179  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5180  m_Capacity = newCapacity;
5181  m_pArray = newArray;
5182  }
5183 
5184  m_Count = newCount;
5185  }
5186 
5187  void clear()
5188  {
5189  resize(0);
5190  }
5191 
5192  void shrink_to_fit()
5193  {
5194  if(m_Capacity > m_Count)
5195  {
5196  T* newArray = VMA_NULL;
5197  if(m_Count > 0)
5198  {
5199  newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
5200  memcpy(newArray, m_pArray, m_Count * sizeof(T));
5201  }
5202  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5203  m_Capacity = m_Count;
5204  m_pArray = newArray;
5205  }
5206  }
5207 
5208  void insert(size_t index, const T& src)
5209  {
5210  VMA_HEAVY_ASSERT(index <= m_Count);
5211  const size_t oldCount = size();
5212  resize(oldCount + 1);
5213  if(index < oldCount)
5214  {
5215  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
5216  }
5217  m_pArray[index] = src;
5218  }
5219 
5220  void remove(size_t index)
5221  {
5222  VMA_HEAVY_ASSERT(index < m_Count);
5223  const size_t oldCount = size();
5224  if(index < oldCount - 1)
5225  {
5226  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
5227  }
5228  resize(oldCount - 1);
5229  }
5230 
5231  void push_back(const T& src)
5232  {
5233  const size_t newIndex = size();
5234  resize(newIndex + 1);
5235  m_pArray[newIndex] = src;
5236  }
5237 
5238  void pop_back()
5239  {
5240  VMA_HEAVY_ASSERT(m_Count > 0);
5241  resize(size() - 1);
5242  }
5243 
5244  void push_front(const T& src)
5245  {
5246  insert(0, src);
5247  }
5248 
5249  void pop_front()
5250  {
5251  VMA_HEAVY_ASSERT(m_Count > 0);
5252  remove(0);
5253  }
5254 
5255  typedef T* iterator;
5256  typedef const T* const_iterator;
5257 
5258  iterator begin() { return m_pArray; }
5259  iterator end() { return m_pArray + m_Count; }
5260  const_iterator cbegin() const { return m_pArray; }
5261  const_iterator cend() const { return m_pArray + m_Count; }
5262  const_iterator begin() const { return cbegin(); }
5263  const_iterator end() const { return cend(); }
5264 
5265 private:
5266  AllocatorT m_Allocator;
5267  T* m_pArray;
5268  size_t m_Count;
5269  size_t m_Capacity;
5270 };
5271 
5272 template<typename T, typename allocatorT>
5273 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
5274 {
5275  vec.insert(index, item);
5276 }
5277 
5278 template<typename T, typename allocatorT>
5279 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
5280 {
5281  vec.remove(index);
5282 }
5283 
5284 #endif // #if VMA_USE_STL_VECTOR
5285 
5286 template<typename CmpLess, typename VectorT>
5287 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
5288 {
5289  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5290  vector.data(),
5291  vector.data() + vector.size(),
5292  value,
5293  CmpLess()) - vector.data();
5294  VmaVectorInsert(vector, indexToInsert, value);
5295  return indexToInsert;
5296 }
5297 
5298 template<typename CmpLess, typename VectorT>
5299 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
5300 {
5301  CmpLess comparator;
5302  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
5303  vector.begin(),
5304  vector.end(),
5305  value,
5306  comparator);
5307  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
5308  {
5309  size_t indexToRemove = it - vector.begin();
5310  VmaVectorRemove(vector, indexToRemove);
5311  return true;
5312  }
5313  return false;
5314 }
5315 
5317 // class VmaSmallVector
5318 
5319 /*
5320 This is a vector (a variable-sized array), optimized for the case when the array is small.
5321 
5322 It contains some number of elements in-place, which allows it to avoid heap allocation
5323 when the actual number of elements is below that threshold. This allows normal "small"
5324 cases to be fast without losing generality for large inputs.
5325 */
5326 
5327 template<typename T, typename AllocatorT, size_t N>
5328 class VmaSmallVector
5329 {
5330 public:
5331  typedef T value_type;
5332 
5333  VmaSmallVector(const AllocatorT& allocator) :
5334  m_Count(0),
5335  m_DynamicArray(allocator)
5336  {
5337  }
5338  VmaSmallVector(size_t count, const AllocatorT& allocator) :
5339  m_Count(count),
5340  m_DynamicArray(count > N ? count : 0, allocator)
5341  {
5342  }
5343  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5344  VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) = delete;
5345  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5346  VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) = delete;
5347 
5348  bool empty() const { return m_Count == 0; }
5349  size_t size() const { return m_Count; }
5350  T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5351  const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5352 
5353  T& operator[](size_t index)
5354  {
5355  VMA_HEAVY_ASSERT(index < m_Count);
5356  return data()[index];
5357  }
5358  const T& operator[](size_t index) const
5359  {
5360  VMA_HEAVY_ASSERT(index < m_Count);
5361  return data()[index];
5362  }
5363 
5364  T& front()
5365  {
5366  VMA_HEAVY_ASSERT(m_Count > 0);
5367  return data()[0];
5368  }
5369  const T& front() const
5370  {
5371  VMA_HEAVY_ASSERT(m_Count > 0);
5372  return data()[0];
5373  }
5374  T& back()
5375  {
5376  VMA_HEAVY_ASSERT(m_Count > 0);
5377  return data()[m_Count - 1];
5378  }
5379  const T& back() const
5380  {
5381  VMA_HEAVY_ASSERT(m_Count > 0);
5382  return data()[m_Count - 1];
5383  }
5384 
5385  void resize(size_t newCount, bool freeMemory = false)
5386  {
5387  if(newCount > N && m_Count > N)
5388  {
5389  // Any direction, staying in m_DynamicArray
5390  m_DynamicArray.resize(newCount);
5391  if(freeMemory)
5392  {
5393  m_DynamicArray.shrink_to_fit();
5394  }
5395  }
5396  else if(newCount > N && m_Count <= N)
5397  {
5398  // Growing, moving from m_StaticArray to m_DynamicArray
5399  m_DynamicArray.resize(newCount);
5400  if(m_Count > 0)
5401  {
5402  memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
5403  }
5404  }
5405  else if(newCount <= N && m_Count > N)
5406  {
5407  // Shrinking, moving from m_DynamicArray to m_StaticArray
5408  if(newCount > 0)
5409  {
5410  memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
5411  }
5412  m_DynamicArray.resize(0);
5413  if(freeMemory)
5414  {
5415  m_DynamicArray.shrink_to_fit();
5416  }
5417  }
5418  else
5419  {
5420  // Any direction, staying in m_StaticArray - nothing to do here
5421  }
5422  m_Count = newCount;
5423  }
5424 
5425  void clear(bool freeMemory = false)
5426  {
5427  m_DynamicArray.clear();
5428  if(freeMemory)
5429  {
5430  m_DynamicArray.shrink_to_fit();
5431  }
5432  m_Count = 0;
5433  }
5434 
5435  void insert(size_t index, const T& src)
5436  {
5437  VMA_HEAVY_ASSERT(index <= m_Count);
5438  const size_t oldCount = size();
5439  resize(oldCount + 1);
5440  T* const dataPtr = data();
5441  if(index < oldCount)
5442  {
5443  // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
5444  memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
5445  }
5446  dataPtr[index] = src;
5447  }
5448 
5449  void remove(size_t index)
5450  {
5451  VMA_HEAVY_ASSERT(index < m_Count);
5452  const size_t oldCount = size();
5453  if(index < oldCount - 1)
5454  {
5455  // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
5456  T* const dataPtr = data();
5457  memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
5458  }
5459  resize(oldCount - 1);
5460  }
5461 
5462  void push_back(const T& src)
5463  {
5464  const size_t newIndex = size();
5465  resize(newIndex + 1);
5466  data()[newIndex] = src;
5467  }
5468 
5469  void pop_back()
5470  {
5471  VMA_HEAVY_ASSERT(m_Count > 0);
5472  resize(size() - 1);
5473  }
5474 
5475  void push_front(const T& src)
5476  {
5477  insert(0, src);
5478  }
5479 
5480  void pop_front()
5481  {
5482  VMA_HEAVY_ASSERT(m_Count > 0);
5483  remove(0);
5484  }
5485 
5486  typedef T* iterator;
5487 
5488  iterator begin() { return data(); }
5489  iterator end() { return data() + m_Count; }
5490 
5491 private:
5492  size_t m_Count;
5493  T m_StaticArray[N]; // Used when m_Size <= N
5494  VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
5495 };
5496 
5498 // class VmaPoolAllocator
5499 
5500 /*
5501 Allocator for objects of type T using a list of arrays (pools) to speed up
5502 allocation. Number of elements that can be allocated is not bounded because
5503 allocator can create multiple blocks.
5504 */
5505 template<typename T>
5506 class VmaPoolAllocator
5507 {
5508  VMA_CLASS_NO_COPY(VmaPoolAllocator)
5509 public:
5510  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
5511  ~VmaPoolAllocator();
5512  template<typename... Types> T* Alloc(Types... args);
5513  void Free(T* ptr);
5514 
5515 private:
5516  union Item
5517  {
5518  uint32_t NextFreeIndex;
5519  alignas(T) char Value[sizeof(T)];
5520  };
5521 
5522  struct ItemBlock
5523  {
5524  Item* pItems;
5525  uint32_t Capacity;
5526  uint32_t FirstFreeIndex;
5527  };
5528 
5529  const VkAllocationCallbacks* m_pAllocationCallbacks;
5530  const uint32_t m_FirstBlockCapacity;
5531  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
5532 
5533  ItemBlock& CreateNewBlock();
5534 };
5535 
5536 template<typename T>
5537 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
5538  m_pAllocationCallbacks(pAllocationCallbacks),
5539  m_FirstBlockCapacity(firstBlockCapacity),
5540  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
5541 {
5542  VMA_ASSERT(m_FirstBlockCapacity > 1);
5543 }
5544 
5545 template<typename T>
5546 VmaPoolAllocator<T>::~VmaPoolAllocator()
5547 {
5548  for(size_t i = m_ItemBlocks.size(); i--; )
5549  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
5550  m_ItemBlocks.clear();
5551 }
5552 
5553 template<typename T>
5554 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
5555 {
5556  for(size_t i = m_ItemBlocks.size(); i--; )
5557  {
5558  ItemBlock& block = m_ItemBlocks[i];
5559  // This block has some free items: Use first one.
5560  if(block.FirstFreeIndex != UINT32_MAX)
5561  {
5562  Item* const pItem = &block.pItems[block.FirstFreeIndex];
5563  block.FirstFreeIndex = pItem->NextFreeIndex;
5564  T* result = (T*)&pItem->Value;
5565  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5566  return result;
5567  }
5568  }
5569 
5570  // No block has free item: Create new one and use it.
5571  ItemBlock& newBlock = CreateNewBlock();
5572  Item* const pItem = &newBlock.pItems[0];
5573  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
5574  T* result = (T*)&pItem->Value;
5575  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5576  return result;
5577 }
5578 
5579 template<typename T>
5580 void VmaPoolAllocator<T>::Free(T* ptr)
5581 {
5582  // Search all memory blocks to find ptr.
5583  for(size_t i = m_ItemBlocks.size(); i--; )
5584  {
5585  ItemBlock& block = m_ItemBlocks[i];
5586 
5587  // Casting to union.
5588  Item* pItemPtr;
5589  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
5590 
5591  // Check if pItemPtr is in address range of this block.
5592  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
5593  {
5594  ptr->~T(); // Explicit destructor call.
5595  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
5596  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
5597  block.FirstFreeIndex = index;
5598  return;
5599  }
5600  }
5601  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
5602 }
5603 
5604 template<typename T>
5605 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
5606 {
5607  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
5608  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
5609 
5610  const ItemBlock newBlock = {
5611  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
5612  newBlockCapacity,
5613  0 };
5614 
5615  m_ItemBlocks.push_back(newBlock);
5616 
5617  // Setup singly-linked list of all free items in this block.
5618  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
5619  newBlock.pItems[i].NextFreeIndex = i + 1;
5620  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
5621  return m_ItemBlocks.back();
5622 }
5623 
5625 // class VmaRawList, VmaList
5626 
5627 #if VMA_USE_STL_LIST
5628 
5629 #define VmaList std::list
5630 
5631 #else // #if VMA_USE_STL_LIST
5632 
5633 template<typename T>
5634 struct VmaListItem
5635 {
5636  VmaListItem* pPrev;
5637  VmaListItem* pNext;
5638  T Value;
5639 };
5640 
5641 // Doubly linked list.
5642 template<typename T>
5643 class VmaRawList
5644 {
5645  VMA_CLASS_NO_COPY(VmaRawList)
5646 public:
5647  typedef VmaListItem<T> ItemType;
5648 
5649  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
5650  ~VmaRawList();
5651  void Clear();
5652 
5653  size_t GetCount() const { return m_Count; }
5654  bool IsEmpty() const { return m_Count == 0; }
5655 
5656  ItemType* Front() { return m_pFront; }
5657  const ItemType* Front() const { return m_pFront; }
5658  ItemType* Back() { return m_pBack; }
5659  const ItemType* Back() const { return m_pBack; }
5660 
5661  ItemType* PushBack();
5662  ItemType* PushFront();
5663  ItemType* PushBack(const T& value);
5664  ItemType* PushFront(const T& value);
5665  void PopBack();
5666  void PopFront();
5667 
5668  // Item can be null - it means PushBack.
5669  ItemType* InsertBefore(ItemType* pItem);
5670  // Item can be null - it means PushFront.
5671  ItemType* InsertAfter(ItemType* pItem);
5672 
5673  ItemType* InsertBefore(ItemType* pItem, const T& value);
5674  ItemType* InsertAfter(ItemType* pItem, const T& value);
5675 
5676  void Remove(ItemType* pItem);
5677 
5678 private:
5679  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5680  VmaPoolAllocator<ItemType> m_ItemAllocator;
5681  ItemType* m_pFront;
5682  ItemType* m_pBack;
5683  size_t m_Count;
5684 };
5685 
5686 template<typename T>
5687 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5688  m_pAllocationCallbacks(pAllocationCallbacks),
5689  m_ItemAllocator(pAllocationCallbacks, 128),
5690  m_pFront(VMA_NULL),
5691  m_pBack(VMA_NULL),
5692  m_Count(0)
5693 {
5694 }
5695 
5696 template<typename T>
5697 VmaRawList<T>::~VmaRawList()
5698 {
5699  // Intentionally not calling Clear, because that would be unnecessary
5700  // computations to return all items to m_ItemAllocator as free.
5701 }
5702 
5703 template<typename T>
5704 void VmaRawList<T>::Clear()
5705 {
5706  if(IsEmpty() == false)
5707  {
5708  ItemType* pItem = m_pBack;
5709  while(pItem != VMA_NULL)
5710  {
5711  ItemType* const pPrevItem = pItem->pPrev;
5712  m_ItemAllocator.Free(pItem);
5713  pItem = pPrevItem;
5714  }
5715  m_pFront = VMA_NULL;
5716  m_pBack = VMA_NULL;
5717  m_Count = 0;
5718  }
5719 }
5720 
5721 template<typename T>
5722 VmaListItem<T>* VmaRawList<T>::PushBack()
5723 {
5724  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5725  pNewItem->pNext = VMA_NULL;
5726  if(IsEmpty())
5727  {
5728  pNewItem->pPrev = VMA_NULL;
5729  m_pFront = pNewItem;
5730  m_pBack = pNewItem;
5731  m_Count = 1;
5732  }
5733  else
5734  {
5735  pNewItem->pPrev = m_pBack;
5736  m_pBack->pNext = pNewItem;
5737  m_pBack = pNewItem;
5738  ++m_Count;
5739  }
5740  return pNewItem;
5741 }
5742 
5743 template<typename T>
5744 VmaListItem<T>* VmaRawList<T>::PushFront()
5745 {
5746  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5747  pNewItem->pPrev = VMA_NULL;
5748  if(IsEmpty())
5749  {
5750  pNewItem->pNext = VMA_NULL;
5751  m_pFront = pNewItem;
5752  m_pBack = pNewItem;
5753  m_Count = 1;
5754  }
5755  else
5756  {
5757  pNewItem->pNext = m_pFront;
5758  m_pFront->pPrev = pNewItem;
5759  m_pFront = pNewItem;
5760  ++m_Count;
5761  }
5762  return pNewItem;
5763 }
5764 
5765 template<typename T>
5766 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5767 {
5768  ItemType* const pNewItem = PushBack();
5769  pNewItem->Value = value;
5770  return pNewItem;
5771 }
5772 
5773 template<typename T>
5774 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5775 {
5776  ItemType* const pNewItem = PushFront();
5777  pNewItem->Value = value;
5778  return pNewItem;
5779 }
5780 
5781 template<typename T>
5782 void VmaRawList<T>::PopBack()
5783 {
5784  VMA_HEAVY_ASSERT(m_Count > 0);
5785  ItemType* const pBackItem = m_pBack;
5786  ItemType* const pPrevItem = pBackItem->pPrev;
5787  if(pPrevItem != VMA_NULL)
5788  {
5789  pPrevItem->pNext = VMA_NULL;
5790  }
5791  m_pBack = pPrevItem;
5792  m_ItemAllocator.Free(pBackItem);
5793  --m_Count;
5794 }
5795 
5796 template<typename T>
5797 void VmaRawList<T>::PopFront()
5798 {
5799  VMA_HEAVY_ASSERT(m_Count > 0);
5800  ItemType* const pFrontItem = m_pFront;
5801  ItemType* const pNextItem = pFrontItem->pNext;
5802  if(pNextItem != VMA_NULL)
5803  {
5804  pNextItem->pPrev = VMA_NULL;
5805  }
5806  m_pFront = pNextItem;
5807  m_ItemAllocator.Free(pFrontItem);
5808  --m_Count;
5809 }
5810 
5811 template<typename T>
5812 void VmaRawList<T>::Remove(ItemType* pItem)
5813 {
5814  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5815  VMA_HEAVY_ASSERT(m_Count > 0);
5816 
5817  if(pItem->pPrev != VMA_NULL)
5818  {
5819  pItem->pPrev->pNext = pItem->pNext;
5820  }
5821  else
5822  {
5823  VMA_HEAVY_ASSERT(m_pFront == pItem);
5824  m_pFront = pItem->pNext;
5825  }
5826 
5827  if(pItem->pNext != VMA_NULL)
5828  {
5829  pItem->pNext->pPrev = pItem->pPrev;
5830  }
5831  else
5832  {
5833  VMA_HEAVY_ASSERT(m_pBack == pItem);
5834  m_pBack = pItem->pPrev;
5835  }
5836 
5837  m_ItemAllocator.Free(pItem);
5838  --m_Count;
5839 }
5840 
5841 template<typename T>
5842 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5843 {
5844  if(pItem != VMA_NULL)
5845  {
5846  ItemType* const prevItem = pItem->pPrev;
5847  ItemType* const newItem = m_ItemAllocator.Alloc();
5848  newItem->pPrev = prevItem;
5849  newItem->pNext = pItem;
5850  pItem->pPrev = newItem;
5851  if(prevItem != VMA_NULL)
5852  {
5853  prevItem->pNext = newItem;
5854  }
5855  else
5856  {
5857  VMA_HEAVY_ASSERT(m_pFront == pItem);
5858  m_pFront = newItem;
5859  }
5860  ++m_Count;
5861  return newItem;
5862  }
5863  else
5864  return PushBack();
5865 }
5866 
5867 template<typename T>
5868 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5869 {
5870  if(pItem != VMA_NULL)
5871  {
5872  ItemType* const nextItem = pItem->pNext;
5873  ItemType* const newItem = m_ItemAllocator.Alloc();
5874  newItem->pNext = nextItem;
5875  newItem->pPrev = pItem;
5876  pItem->pNext = newItem;
5877  if(nextItem != VMA_NULL)
5878  {
5879  nextItem->pPrev = newItem;
5880  }
5881  else
5882  {
5883  VMA_HEAVY_ASSERT(m_pBack == pItem);
5884  m_pBack = newItem;
5885  }
5886  ++m_Count;
5887  return newItem;
5888  }
5889  else
5890  return PushFront();
5891 }
5892 
5893 template<typename T>
5894 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5895 {
5896  ItemType* const newItem = InsertBefore(pItem);
5897  newItem->Value = value;
5898  return newItem;
5899 }
5900 
5901 template<typename T>
5902 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5903 {
5904  ItemType* const newItem = InsertAfter(pItem);
5905  newItem->Value = value;
5906  return newItem;
5907 }
5908 
5909 template<typename T, typename AllocatorT>
5910 class VmaList
5911 {
5912  VMA_CLASS_NO_COPY(VmaList)
5913 public:
5914  class iterator
5915  {
5916  public:
5917  iterator() :
5918  m_pList(VMA_NULL),
5919  m_pItem(VMA_NULL)
5920  {
5921  }
5922 
5923  T& operator*() const
5924  {
5925  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5926  return m_pItem->Value;
5927  }
5928  T* operator->() const
5929  {
5930  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5931  return &m_pItem->Value;
5932  }
5933 
5934  iterator& operator++()
5935  {
5936  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5937  m_pItem = m_pItem->pNext;
5938  return *this;
5939  }
5940  iterator& operator--()
5941  {
5942  if(m_pItem != VMA_NULL)
5943  {
5944  m_pItem = m_pItem->pPrev;
5945  }
5946  else
5947  {
5948  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5949  m_pItem = m_pList->Back();
5950  }
5951  return *this;
5952  }
5953 
5954  iterator operator++(int)
5955  {
5956  iterator result = *this;
5957  ++*this;
5958  return result;
5959  }
5960  iterator operator--(int)
5961  {
5962  iterator result = *this;
5963  --*this;
5964  return result;
5965  }
5966 
5967  bool operator==(const iterator& rhs) const
5968  {
5969  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5970  return m_pItem == rhs.m_pItem;
5971  }
5972  bool operator!=(const iterator& rhs) const
5973  {
5974  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5975  return m_pItem != rhs.m_pItem;
5976  }
5977 
5978  private:
5979  VmaRawList<T>* m_pList;
5980  VmaListItem<T>* m_pItem;
5981 
5982  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5983  m_pList(pList),
5984  m_pItem(pItem)
5985  {
5986  }
5987 
5988  friend class VmaList<T, AllocatorT>;
5989  };
5990 
5991  class const_iterator
5992  {
5993  public:
5994  const_iterator() :
5995  m_pList(VMA_NULL),
5996  m_pItem(VMA_NULL)
5997  {
5998  }
5999 
6000  const_iterator(const iterator& src) :
6001  m_pList(src.m_pList),
6002  m_pItem(src.m_pItem)
6003  {
6004  }
6005 
6006  const T& operator*() const
6007  {
6008  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
6009  return m_pItem->Value;
6010  }
6011  const T* operator->() const
6012  {
6013  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
6014  return &m_pItem->Value;
6015  }
6016 
6017  const_iterator& operator++()
6018  {
6019  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
6020  m_pItem = m_pItem->pNext;
6021  return *this;
6022  }
6023  const_iterator& operator--()
6024  {
6025  if(m_pItem != VMA_NULL)
6026  {
6027  m_pItem = m_pItem->pPrev;
6028  }
6029  else
6030  {
6031  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
6032  m_pItem = m_pList->Back();
6033  }
6034  return *this;
6035  }
6036 
6037  const_iterator operator++(int)
6038  {
6039  const_iterator result = *this;
6040  ++*this;
6041  return result;
6042  }
6043  const_iterator operator--(int)
6044  {
6045  const_iterator result = *this;
6046  --*this;
6047  return result;
6048  }
6049 
6050  bool operator==(const const_iterator& rhs) const
6051  {
6052  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
6053  return m_pItem == rhs.m_pItem;
6054  }
6055  bool operator!=(const const_iterator& rhs) const
6056  {
6057  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
6058  return m_pItem != rhs.m_pItem;
6059  }
6060 
6061  private:
6062  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
6063  m_pList(pList),
6064  m_pItem(pItem)
6065  {
6066  }
6067 
6068  const VmaRawList<T>* m_pList;
6069  const VmaListItem<T>* m_pItem;
6070 
6071  friend class VmaList<T, AllocatorT>;
6072  };
6073 
6074  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
6075 
6076  bool empty() const { return m_RawList.IsEmpty(); }
6077  size_t size() const { return m_RawList.GetCount(); }
6078 
6079  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
6080  iterator end() { return iterator(&m_RawList, VMA_NULL); }
6081 
6082  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
6083  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
6084 
6085  const_iterator begin() const { return cbegin(); }
6086  const_iterator end() const { return cend(); }
6087 
6088  void clear() { m_RawList.Clear(); }
6089  void push_back(const T& value) { m_RawList.PushBack(value); }
6090  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
6091  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
6092 
6093 private:
6094  VmaRawList<T> m_RawList;
6095 };
6096 
6097 #endif // #if VMA_USE_STL_LIST
6098 
6100 // class VmaIntrusiveLinkedList
6101 
6102 /*
6103 Expected interface of ItemTypeTraits:
6104 struct MyItemTypeTraits
6105 {
6106  typedef MyItem ItemType;
6107  static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
6108  static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
6109  static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
6110  static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
6111 };
6112 */
6113 template<typename ItemTypeTraits>
6114 class VmaIntrusiveLinkedList
6115 {
6116 public:
6117  typedef typename ItemTypeTraits::ItemType ItemType;
6118  static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
6119  static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
6120  // Movable, not copyable.
6121  VmaIntrusiveLinkedList() { }
6122  VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList<ItemTypeTraits>& src) = delete;
6123  VmaIntrusiveLinkedList(VmaIntrusiveLinkedList<ItemTypeTraits>&& src) :
6124  m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
6125  {
6126  src.m_Front = src.m_Back = VMA_NULL;
6127  src.m_Count = 0;
6128  }
6129  ~VmaIntrusiveLinkedList()
6130  {
6131  VMA_HEAVY_ASSERT(IsEmpty());
6132  }
6133  VmaIntrusiveLinkedList<ItemTypeTraits>& operator=(const VmaIntrusiveLinkedList<ItemTypeTraits>& src) = delete;
6134  VmaIntrusiveLinkedList<ItemTypeTraits>& operator=(VmaIntrusiveLinkedList<ItemTypeTraits>&& src)
6135  {
6136  if(&src != this)
6137  {
6138  VMA_HEAVY_ASSERT(IsEmpty());
6139  m_Front = src.m_Front;
6140  m_Back = src.m_Back;
6141  m_Count = src.m_Count;
6142  src.m_Front = src.m_Back = VMA_NULL;
6143  src.m_Count = 0;
6144  }
6145  return *this;
6146  }
6147  void RemoveAll()
6148  {
6149  if(!IsEmpty())
6150  {
6151  ItemType* item = m_Back;
6152  while(item != VMA_NULL)
6153  {
6154  ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
6155  ItemTypeTraits::AccessPrev(item) = VMA_NULL;
6156  ItemTypeTraits::AccessNext(item) = VMA_NULL;
6157  item = prevItem;
6158  }
6159  m_Front = VMA_NULL;
6160  m_Back = VMA_NULL;
6161  m_Count = 0;
6162  }
6163  }
6164  size_t GetCount() const { return m_Count; }
6165  bool IsEmpty() const { return m_Count == 0; }
6166  ItemType* Front() { return m_Front; }
6167  const ItemType* Front() const { return m_Front; }
6168  ItemType* Back() { return m_Back; }
6169  const ItemType* Back() const { return m_Back; }
6170  void PushBack(ItemType* item)
6171  {
6172  VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
6173  if(IsEmpty())
6174  {
6175  m_Front = item;
6176  m_Back = item;
6177  m_Count = 1;
6178  }
6179  else
6180  {
6181  ItemTypeTraits::AccessPrev(item) = m_Back;
6182  ItemTypeTraits::AccessNext(m_Back) = item;
6183  m_Back = item;
6184  ++m_Count;
6185  }
6186  }
6187  void PushFront(ItemType* item)
6188  {
6189  VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
6190  if(IsEmpty())
6191  {
6192  m_Front = item;
6193  m_Back = item;
6194  m_Count = 1;
6195  }
6196  else
6197  {
6198  ItemTypeTraits::AccessNext(item) = m_Front;
6199  ItemTypeTraits::AccessPrev(m_Front) = item;
6200  m_Front = item;
6201  ++m_Count;
6202  }
6203  }
6204  ItemType* PopBack()
6205  {
6206  VMA_HEAVY_ASSERT(m_Count > 0);
6207  ItemType* const backItem = m_Back;
6208  ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
6209  if(prevItem != VMA_NULL)
6210  {
6211  ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
6212  }
6213  m_Back = prevItem;
6214  --m_Count;
6215  ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
6216  ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
6217  return backItem;
6218  }
6219  ItemType* PopFront()
6220  {
6221  VMA_HEAVY_ASSERT(m_Count > 0);
6222  ItemType* const frontItem = m_Front;
6223  ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
6224  if(nextItem != VMA_NULL)
6225  {
6226  ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
6227  }
6228  m_Front = nextItem;
6229  --m_Count;
6230  ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
6231  ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
6232  return frontItem;
6233  }
6234 
6235  // MyItem can be null - it means PushBack.
6236  void InsertBefore(ItemType* existingItem, ItemType* newItem)
6237  {
6238  VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
6239  if(existingItem != VMA_NULL)
6240  {
6241  ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
6242  ItemTypeTraits::AccessPrev(newItem) = prevItem;
6243  ItemTypeTraits::AccessNext(newItem) = existingItem;
6244  ItemTypeTraits::AccessPrev(existingItem) = newItem;
6245  if(prevItem != VMA_NULL)
6246  {
6247  ItemTypeTraits::AccessNext(prevItem) = newItem;
6248  }
6249  else
6250  {
6251  VMA_HEAVY_ASSERT(m_Front == existingItem);
6252  m_Front = newItem;
6253  }
6254  ++m_Count;
6255  }
6256  else
6257  PushBack(newItem);
6258  }
6259  // MyItem can be null - it means PushFront.
6260  void InsertAfter(ItemType* existingItem, ItemType* newItem)
6261  {
6262  VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
6263  if(existingItem != VMA_NULL)
6264  {
6265  ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
6266  ItemTypeTraits::AccessNext(newItem) = nextItem;
6267  ItemTypeTraits::AccessPrev(newItem) = existingItem;
6268  ItemTypeTraits::AccessNext(existingItem) = newItem;
6269  if(nextItem != VMA_NULL)
6270  {
6271  ItemTypeTraits::AccessPrev(nextItem) = newItem;
6272  }
6273  else
6274  {
6275  VMA_HEAVY_ASSERT(m_Back == existingItem);
6276  m_Back = newItem;
6277  }
6278  ++m_Count;
6279  }
6280  else
6281  return PushFront(newItem);
6282  }
6283  void Remove(ItemType* item)
6284  {
6285  VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
6286  if(ItemTypeTraits::GetPrev(item) != VMA_NULL)
6287  {
6288  ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
6289  }
6290  else
6291  {
6292  VMA_HEAVY_ASSERT(m_Front == item);
6293  m_Front = ItemTypeTraits::GetNext(item);
6294  }
6295 
6296  if(ItemTypeTraits::GetNext(item) != VMA_NULL)
6297  {
6298  ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
6299  }
6300  else
6301  {
6302  VMA_HEAVY_ASSERT(m_Back == item);
6303  m_Back = ItemTypeTraits::GetPrev(item);
6304  }
6305  ItemTypeTraits::AccessPrev(item) = VMA_NULL;
6306  ItemTypeTraits::AccessNext(item) = VMA_NULL;
6307  --m_Count;
6308  }
6309 private:
6310  ItemType* m_Front = VMA_NULL;
6311  ItemType* m_Back = VMA_NULL;
6312  size_t m_Count = 0;
6313 };
6314 
6316 // class VmaMap
6317 
6318 // Unused in this version.
6319 #if 0
6320 
6321 #if VMA_USE_STL_UNORDERED_MAP
6322 
6323 #define VmaPair std::pair
6324 
6325 #define VMA_MAP_TYPE(KeyT, ValueT) \
6326  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
6327 
6328 #else // #if VMA_USE_STL_UNORDERED_MAP
6329 
6330 template<typename T1, typename T2>
6331 struct VmaPair
6332 {
6333  T1 first;
6334  T2 second;
6335 
6336  VmaPair() : first(), second() { }
6337  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
6338 };
6339 
6340 /* Class compatible with subset of interface of std::unordered_map.
6341 KeyT, ValueT must be POD because they will be stored in VmaVector.
6342 */
6343 template<typename KeyT, typename ValueT>
6344 class VmaMap
6345 {
6346 public:
6347  typedef VmaPair<KeyT, ValueT> PairType;
6348  typedef PairType* iterator;
6349 
6350  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
6351 
6352  iterator begin() { return m_Vector.begin(); }
6353  iterator end() { return m_Vector.end(); }
6354 
6355  void insert(const PairType& pair);
6356  iterator find(const KeyT& key);
6357  void erase(iterator it);
6358 
6359 private:
6360  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
6361 };
6362 
6363 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
6364 
6365 template<typename FirstT, typename SecondT>
6366 struct VmaPairFirstLess
6367 {
6368  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
6369  {
6370  return lhs.first < rhs.first;
6371  }
6372  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
6373  {
6374  return lhs.first < rhsFirst;
6375  }
6376 };
6377 
6378 template<typename KeyT, typename ValueT>
6379 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
6380 {
6381  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
6382  m_Vector.data(),
6383  m_Vector.data() + m_Vector.size(),
6384  pair,
6385  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
6386  VmaVectorInsert(m_Vector, indexToInsert, pair);
6387 }
6388 
6389 template<typename KeyT, typename ValueT>
6390 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
6391 {
6392  PairType* it = VmaBinaryFindFirstNotLess(
6393  m_Vector.data(),
6394  m_Vector.data() + m_Vector.size(),
6395  key,
6396  VmaPairFirstLess<KeyT, ValueT>());
6397  if((it != m_Vector.end()) && (it->first == key))
6398  {
6399  return it;
6400  }
6401  else
6402  {
6403  return m_Vector.end();
6404  }
6405 }
6406 
6407 template<typename KeyT, typename ValueT>
6408 void VmaMap<KeyT, ValueT>::erase(iterator it)
6409 {
6410  VmaVectorRemove(m_Vector, it - m_Vector.begin());
6411 }
6412 
6413 #endif // #if VMA_USE_STL_UNORDERED_MAP
6414 
6415 #endif // #if 0
6416 
6418 
6419 class VmaDeviceMemoryBlock;
6420 
6421 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
6422 
6423 struct VmaAllocation_T
6424 {
6425 private:
6426  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
6427 
6428  enum FLAGS
6429  {
6430  FLAG_USER_DATA_STRING = 0x01,
6431  };
6432 
6433 public:
6434  enum ALLOCATION_TYPE
6435  {
6436  ALLOCATION_TYPE_NONE,
6437  ALLOCATION_TYPE_BLOCK,
6438  ALLOCATION_TYPE_DEDICATED,
6439  };
6440 
6441  /*
6442  This struct is allocated using VmaPoolAllocator.
6443  */
6444 
6445  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
6446  m_Alignment{1},
6447  m_Size{0},
6448  m_pUserData{VMA_NULL},
6449  m_LastUseFrameIndex{currentFrameIndex},
6450  m_MemoryTypeIndex{0},
6451  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
6452  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
6453  m_MapCount{0},
6454  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
6455  {
6456 #if VMA_STATS_STRING_ENABLED
6457  m_CreationFrameIndex = currentFrameIndex;
6458  m_BufferImageUsage = 0;
6459 #endif
6460  }
6461 
6462  ~VmaAllocation_T()
6463  {
6464  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
6465 
6466  // Check if owned string was freed.
6467  VMA_ASSERT(m_pUserData == VMA_NULL);
6468  }
6469 
6470  void InitBlockAllocation(
6471  VmaDeviceMemoryBlock* block,
6472  VkDeviceSize offset,
6473  VkDeviceSize alignment,
6474  VkDeviceSize size,
6475  uint32_t memoryTypeIndex,
6476  VmaSuballocationType suballocationType,
6477  bool mapped,
6478  bool canBecomeLost)
6479  {
6480  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6481  VMA_ASSERT(block != VMA_NULL);
6482  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6483  m_Alignment = alignment;
6484  m_Size = size;
6485  m_MemoryTypeIndex = memoryTypeIndex;
6486  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6487  m_SuballocationType = (uint8_t)suballocationType;
6488  m_BlockAllocation.m_Block = block;
6489  m_BlockAllocation.m_Offset = offset;
6490  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
6491  }
6492 
6493  void InitLost()
6494  {
6495  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6496  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
6497  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6498  m_MemoryTypeIndex = 0;
6499  m_BlockAllocation.m_Block = VMA_NULL;
6500  m_BlockAllocation.m_Offset = 0;
6501  m_BlockAllocation.m_CanBecomeLost = true;
6502  }
6503 
6504  void ChangeBlockAllocation(
6505  VmaAllocator hAllocator,
6506  VmaDeviceMemoryBlock* block,
6507  VkDeviceSize offset);
6508 
6509  void ChangeOffset(VkDeviceSize newOffset);
6510 
6511  // pMappedData not null means allocation is created with MAPPED flag.
6512  void InitDedicatedAllocation(
6513  uint32_t memoryTypeIndex,
6514  VkDeviceMemory hMemory,
6515  VmaSuballocationType suballocationType,
6516  void* pMappedData,
6517  VkDeviceSize size)
6518  {
6519  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6520  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
6521  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
6522  m_Alignment = 0;
6523  m_Size = size;
6524  m_MemoryTypeIndex = memoryTypeIndex;
6525  m_SuballocationType = (uint8_t)suballocationType;
6526  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6527  m_DedicatedAllocation.m_hMemory = hMemory;
6528  m_DedicatedAllocation.m_pMappedData = pMappedData;
6529  m_DedicatedAllocation.m_Prev = VMA_NULL;
6530  m_DedicatedAllocation.m_Next = VMA_NULL;
6531  }
6532 
6533  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
6534  VkDeviceSize GetAlignment() const { return m_Alignment; }
6535  VkDeviceSize GetSize() const { return m_Size; }
6536  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
6537  void* GetUserData() const { return m_pUserData; }
6538  void SetUserData(VmaAllocator hAllocator, void* pUserData);
6539  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
6540 
6541  VmaDeviceMemoryBlock* GetBlock() const
6542  {
6543  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6544  return m_BlockAllocation.m_Block;
6545  }
6546  VkDeviceSize GetOffset() const;
6547  VkDeviceMemory GetMemory() const;
6548  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6549  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
6550  void* GetMappedData() const;
6551  bool CanBecomeLost() const;
6552 
6553  uint32_t GetLastUseFrameIndex() const
6554  {
6555  return m_LastUseFrameIndex.load();
6556  }
6557  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
6558  {
6559  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
6560  }
6561  /*
6562  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
6563  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
6564  - Else, returns false.
6565 
6566  If hAllocation is already lost, assert - you should not call it then.
6567  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
6568  */
6569  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6570 
6571  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
6572  {
6573  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
6574  outInfo.blockCount = 1;
6575  outInfo.allocationCount = 1;
6576  outInfo.unusedRangeCount = 0;
6577  outInfo.usedBytes = m_Size;
6578  outInfo.unusedBytes = 0;
6579  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
6580  outInfo.unusedRangeSizeMin = UINT64_MAX;
6581  outInfo.unusedRangeSizeMax = 0;
6582  }
6583 
6584  void BlockAllocMap();
6585  void BlockAllocUnmap();
6586  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
6587  void DedicatedAllocUnmap(VmaAllocator hAllocator);
6588 
6589 #if VMA_STATS_STRING_ENABLED
6590  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
6591  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
6592 
6593  void InitBufferImageUsage(uint32_t bufferImageUsage)
6594  {
6595  VMA_ASSERT(m_BufferImageUsage == 0);
6596  m_BufferImageUsage = bufferImageUsage;
6597  }
6598 
6599  void PrintParameters(class VmaJsonWriter& json) const;
6600 #endif
6601 
6602 private:
6603  VkDeviceSize m_Alignment;
6604  VkDeviceSize m_Size;
6605  void* m_pUserData;
6606  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
6607  uint32_t m_MemoryTypeIndex;
6608  uint8_t m_Type; // ALLOCATION_TYPE
6609  uint8_t m_SuballocationType; // VmaSuballocationType
6610  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
6611  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
6612  uint8_t m_MapCount;
6613  uint8_t m_Flags; // enum FLAGS
6614 
6615  // Allocation out of VmaDeviceMemoryBlock.
6616  struct BlockAllocation
6617  {
6618  VmaDeviceMemoryBlock* m_Block;
6619  VkDeviceSize m_Offset;
6620  bool m_CanBecomeLost;
6621  };
6622 
6623  // Allocation for an object that has its own private VkDeviceMemory.
6624  struct DedicatedAllocation
6625  {
6626  VkDeviceMemory m_hMemory;
6627  void* m_pMappedData; // Not null means memory is mapped.
6628  VmaAllocation_T* m_Prev;
6629  VmaAllocation_T* m_Next;
6630  };
6631 
6632  union
6633  {
6634  // Allocation out of VmaDeviceMemoryBlock.
6635  BlockAllocation m_BlockAllocation;
6636  // Allocation for an object that has its own private VkDeviceMemory.
6637  DedicatedAllocation m_DedicatedAllocation;
6638  };
6639 
6640 #if VMA_STATS_STRING_ENABLED
6641  uint32_t m_CreationFrameIndex;
6642  uint32_t m_BufferImageUsage; // 0 if unknown.
6643 #endif
6644 
6645  void FreeUserDataString(VmaAllocator hAllocator);
6646 
6647  friend struct VmaDedicatedAllocationListItemTraits;
6648 };
6649 
6650 struct VmaDedicatedAllocationListItemTraits
6651 {
6652  typedef VmaAllocation_T ItemType;
6653  static ItemType* GetPrev(const ItemType* item)
6654  {
6655  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6656  return item->m_DedicatedAllocation.m_Prev;
6657  }
6658  static ItemType* GetNext(const ItemType* item)
6659  {
6660  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6661  return item->m_DedicatedAllocation.m_Next;
6662  }
6663  static ItemType*& AccessPrev(ItemType* item)
6664  {
6665  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6666  return item->m_DedicatedAllocation.m_Prev;
6667  }
6668  static ItemType*& AccessNext(ItemType* item){
6669  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6670  return item->m_DedicatedAllocation.m_Next;
6671  }
6672 };
6673 
6674 /*
6675 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
6676 allocated memory block or free.
6677 */
6678 struct VmaSuballocation
6679 {
6680  VkDeviceSize offset;
6681  VkDeviceSize size;
6682  VmaAllocation hAllocation;
6683  VmaSuballocationType type;
6684 };
6685 
6686 // Comparator for offsets.
6687 struct VmaSuballocationOffsetLess
6688 {
6689  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6690  {
6691  return lhs.offset < rhs.offset;
6692  }
6693 };
6694 struct VmaSuballocationOffsetGreater
6695 {
6696  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6697  {
6698  return lhs.offset > rhs.offset;
6699  }
6700 };
6701 
6702 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
6703 
6704 // Cost of one additional allocation lost, as equivalent in bytes.
6705 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
6706 
6707 enum class VmaAllocationRequestType
6708 {
6709  Normal,
6710  // Used by "Linear" algorithm.
6711  UpperAddress,
6712  EndOf1st,
6713  EndOf2nd,
6714 };
6715 
6716 /*
6717 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
6718 
6719 If canMakeOtherLost was false:
6720 - item points to a FREE suballocation.
6721 - itemsToMakeLostCount is 0.
6722 
6723 If canMakeOtherLost was true:
6724 - item points to first of sequence of suballocations, which are either FREE,
6725  or point to VmaAllocations that can become lost.
6726 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
6727  the requested allocation to succeed.
6728 */
6729 struct VmaAllocationRequest
6730 {
6731  VkDeviceSize offset;
6732  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
6733  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
6734  VmaSuballocationList::iterator item;
6735  size_t itemsToMakeLostCount;
6736  void* customData;
6737  VmaAllocationRequestType type;
6738 
6739  VkDeviceSize CalcCost() const
6740  {
6741  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
6742  }
6743 };
6744 
6745 /*
6746 Data structure used for bookkeeping of allocations and unused ranges of memory
6747 in a single VkDeviceMemory block.
6748 */
6749 class VmaBlockMetadata
6750 {
6751 public:
6752  VmaBlockMetadata(VmaAllocator hAllocator);
6753  virtual ~VmaBlockMetadata() { }
6754  virtual void Init(VkDeviceSize size) { m_Size = size; }
6755 
6756  // Validates all data structures inside this object. If not valid, returns false.
6757  virtual bool Validate() const = 0;
6758  VkDeviceSize GetSize() const { return m_Size; }
6759  virtual size_t GetAllocationCount() const = 0;
6760  virtual VkDeviceSize GetSumFreeSize() const = 0;
6761  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
6762  // Returns true if this block is empty - contains only single free suballocation.
6763  virtual bool IsEmpty() const = 0;
6764 
6765  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
6766  // Shouldn't modify blockCount.
6767  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
6768 
6769 #if VMA_STATS_STRING_ENABLED
6770  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
6771 #endif
6772 
6773  // Tries to find a place for suballocation with given parameters inside this block.
6774  // If succeeded, fills pAllocationRequest and returns true.
6775  // If failed, returns false.
6776  virtual bool CreateAllocationRequest(
6777  uint32_t currentFrameIndex,
6778  uint32_t frameInUseCount,
6779  VkDeviceSize bufferImageGranularity,
6780  VkDeviceSize allocSize,
6781  VkDeviceSize allocAlignment,
6782  bool upperAddress,
6783  VmaSuballocationType allocType,
6784  bool canMakeOtherLost,
6785  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6786  uint32_t strategy,
6787  VmaAllocationRequest* pAllocationRequest) = 0;
6788 
6789  virtual bool MakeRequestedAllocationsLost(
6790  uint32_t currentFrameIndex,
6791  uint32_t frameInUseCount,
6792  VmaAllocationRequest* pAllocationRequest) = 0;
6793 
6794  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
6795 
6796  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6797 
6798  // Makes actual allocation based on request. Request must already be checked and valid.
6799  virtual void Alloc(
6800  const VmaAllocationRequest& request,
6801  VmaSuballocationType type,
6802  VkDeviceSize allocSize,
6803  VmaAllocation hAllocation) = 0;
6804 
6805  // Frees suballocation assigned to given memory region.
6806  virtual void Free(const VmaAllocation allocation) = 0;
6807  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
6808 
6809 protected:
6810  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
6811 
6812 #if VMA_STATS_STRING_ENABLED
6813  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6814  VkDeviceSize unusedBytes,
6815  size_t allocationCount,
6816  size_t unusedRangeCount) const;
6817  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6818  VkDeviceSize offset,
6819  VmaAllocation hAllocation) const;
6820  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6821  VkDeviceSize offset,
6822  VkDeviceSize size) const;
6823  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6824 #endif
6825 
6826 private:
6827  VkDeviceSize m_Size;
6828  const VkAllocationCallbacks* m_pAllocationCallbacks;
6829 };
6830 
6831 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
6832  VMA_ASSERT(0 && "Validation failed: " #cond); \
6833  return false; \
6834  } } while(false)
6835 
6836 class VmaBlockMetadata_Generic : public VmaBlockMetadata
6837 {
6838  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
6839 public:
6840  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
6841  virtual ~VmaBlockMetadata_Generic();
6842  virtual void Init(VkDeviceSize size);
6843 
6844  virtual bool Validate() const;
6845  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
6846  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6847  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6848  virtual bool IsEmpty() const;
6849 
6850  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6851  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6852 
6853 #if VMA_STATS_STRING_ENABLED
6854  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6855 #endif
6856 
6857  virtual bool CreateAllocationRequest(
6858  uint32_t currentFrameIndex,
6859  uint32_t frameInUseCount,
6860  VkDeviceSize bufferImageGranularity,
6861  VkDeviceSize allocSize,
6862  VkDeviceSize allocAlignment,
6863  bool upperAddress,
6864  VmaSuballocationType allocType,
6865  bool canMakeOtherLost,
6866  uint32_t strategy,
6867  VmaAllocationRequest* pAllocationRequest);
6868 
6869  virtual bool MakeRequestedAllocationsLost(
6870  uint32_t currentFrameIndex,
6871  uint32_t frameInUseCount,
6872  VmaAllocationRequest* pAllocationRequest);
6873 
6874  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6875 
6876  virtual VkResult CheckCorruption(const void* pBlockData);
6877 
6878  virtual void Alloc(
6879  const VmaAllocationRequest& request,
6880  VmaSuballocationType type,
6881  VkDeviceSize allocSize,
6882  VmaAllocation hAllocation);
6883 
6884  virtual void Free(const VmaAllocation allocation);
6885  virtual void FreeAtOffset(VkDeviceSize offset);
6886 
6888  // For defragmentation
6889 
6890  bool IsBufferImageGranularityConflictPossible(
6891  VkDeviceSize bufferImageGranularity,
6892  VmaSuballocationType& inOutPrevSuballocType) const;
6893 
6894 private:
6895  friend class VmaDefragmentationAlgorithm_Generic;
6896  friend class VmaDefragmentationAlgorithm_Fast;
6897 
6898  uint32_t m_FreeCount;
6899  VkDeviceSize m_SumFreeSize;
6900  VmaSuballocationList m_Suballocations;
6901  // Suballocations that are free and have size greater than certain threshold.
6902  // Sorted by size, ascending.
6903  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
6904 
6905  bool ValidateFreeSuballocationList() const;
6906 
6907  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6908  // If yes, fills pOffset and returns true. If no, returns false.
6909  bool CheckAllocation(
6910  uint32_t currentFrameIndex,
6911  uint32_t frameInUseCount,
6912  VkDeviceSize bufferImageGranularity,
6913  VkDeviceSize allocSize,
6914  VkDeviceSize allocAlignment,
6915  VmaSuballocationType allocType,
6916  VmaSuballocationList::const_iterator suballocItem,
6917  bool canMakeOtherLost,
6918  VkDeviceSize* pOffset,
6919  size_t* itemsToMakeLostCount,
6920  VkDeviceSize* pSumFreeSize,
6921  VkDeviceSize* pSumItemSize) const;
6922  // Given free suballocation, it merges it with following one, which must also be free.
6923  void MergeFreeWithNext(VmaSuballocationList::iterator item);
6924  // Releases given suballocation, making it free.
6925  // Merges it with adjacent free suballocations if applicable.
6926  // Returns iterator to new free suballocation at this place.
6927  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6928  // Given free suballocation, it inserts it into sorted list of
6929  // m_FreeSuballocationsBySize if it's suitable.
6930  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6931  // Given free suballocation, it removes it from sorted list of
6932  // m_FreeSuballocationsBySize if it's suitable.
6933  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6934 };
6935 
6936 /*
6937 Allocations and their references in internal data structure look like this:
6938 
6939 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6940 
6941  0 +-------+
6942  | |
6943  | |
6944  | |
6945  +-------+
6946  | Alloc | 1st[m_1stNullItemsBeginCount]
6947  +-------+
6948  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6949  +-------+
6950  | ... |
6951  +-------+
6952  | Alloc | 1st[1st.size() - 1]
6953  +-------+
6954  | |
6955  | |
6956  | |
6957 GetSize() +-------+
6958 
6959 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6960 
6961  0 +-------+
6962  | Alloc | 2nd[0]
6963  +-------+
6964  | Alloc | 2nd[1]
6965  +-------+
6966  | ... |
6967  +-------+
6968  | Alloc | 2nd[2nd.size() - 1]
6969  +-------+
6970  | |
6971  | |
6972  | |
6973  +-------+
6974  | Alloc | 1st[m_1stNullItemsBeginCount]
6975  +-------+
6976  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6977  +-------+
6978  | ... |
6979  +-------+
6980  | Alloc | 1st[1st.size() - 1]
6981  +-------+
6982  | |
6983 GetSize() +-------+
6984 
6985 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6986 
6987  0 +-------+
6988  | |
6989  | |
6990  | |
6991  +-------+
6992  | Alloc | 1st[m_1stNullItemsBeginCount]
6993  +-------+
6994  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6995  +-------+
6996  | ... |
6997  +-------+
6998  | Alloc | 1st[1st.size() - 1]
6999  +-------+
7000  | |
7001  | |
7002  | |
7003  +-------+
7004  | Alloc | 2nd[2nd.size() - 1]
7005  +-------+
7006  | ... |
7007  +-------+
7008  | Alloc | 2nd[1]
7009  +-------+
7010  | Alloc | 2nd[0]
7011 GetSize() +-------+
7012 
7013 */
7014 class VmaBlockMetadata_Linear : public VmaBlockMetadata
7015 {
7016  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
7017 public:
7018  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
7019  virtual ~VmaBlockMetadata_Linear();
7020  virtual void Init(VkDeviceSize size);
7021 
7022  virtual bool Validate() const;
7023  virtual size_t GetAllocationCount() const;
7024  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
7025  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
7026  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
7027 
7028  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
7029  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
7030 
7031 #if VMA_STATS_STRING_ENABLED
7032  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
7033 #endif
7034 
7035  virtual bool CreateAllocationRequest(
7036  uint32_t currentFrameIndex,
7037  uint32_t frameInUseCount,
7038  VkDeviceSize bufferImageGranularity,
7039  VkDeviceSize allocSize,
7040  VkDeviceSize allocAlignment,
7041  bool upperAddress,
7042  VmaSuballocationType allocType,
7043  bool canMakeOtherLost,
7044  uint32_t strategy,
7045  VmaAllocationRequest* pAllocationRequest);
7046 
7047  virtual bool MakeRequestedAllocationsLost(
7048  uint32_t currentFrameIndex,
7049  uint32_t frameInUseCount,
7050  VmaAllocationRequest* pAllocationRequest);
7051 
7052  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
7053 
7054  virtual VkResult CheckCorruption(const void* pBlockData);
7055 
7056  virtual void Alloc(
7057  const VmaAllocationRequest& request,
7058  VmaSuballocationType type,
7059  VkDeviceSize allocSize,
7060  VmaAllocation hAllocation);
7061 
7062  virtual void Free(const VmaAllocation allocation);
7063  virtual void FreeAtOffset(VkDeviceSize offset);
7064 
7065 private:
7066  /*
7067  There are two suballocation vectors, used in ping-pong way.
7068  The one with index m_1stVectorIndex is called 1st.
7069  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
7070  2nd can be non-empty only when 1st is not empty.
7071  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
7072  */
7073  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
7074 
7075  enum SECOND_VECTOR_MODE
7076  {
7077  SECOND_VECTOR_EMPTY,
7078  /*
7079  Suballocations in 2nd vector are created later than the ones in 1st, but they
7080  all have smaller offset.
7081  */
7082  SECOND_VECTOR_RING_BUFFER,
7083  /*
7084  Suballocations in 2nd vector are upper side of double stack.
7085  They all have offsets higher than those in 1st vector.
7086  Top of this stack means smaller offsets, but higher indices in this vector.
7087  */
7088  SECOND_VECTOR_DOUBLE_STACK,
7089  };
7090 
7091  VkDeviceSize m_SumFreeSize;
7092  SuballocationVectorType m_Suballocations0, m_Suballocations1;
7093  uint32_t m_1stVectorIndex;
7094  SECOND_VECTOR_MODE m_2ndVectorMode;
7095 
7096  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
7097  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
7098  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
7099  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
7100 
7101  // Number of items in 1st vector with hAllocation = null at the beginning.
7102  size_t m_1stNullItemsBeginCount;
7103  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
7104  size_t m_1stNullItemsMiddleCount;
7105  // Number of items in 2nd vector with hAllocation = null.
7106  size_t m_2ndNullItemsCount;
7107 
7108  bool ShouldCompact1st() const;
7109  void CleanupAfterFree();
7110 
7111  bool CreateAllocationRequest_LowerAddress(
7112  uint32_t currentFrameIndex,
7113  uint32_t frameInUseCount,
7114  VkDeviceSize bufferImageGranularity,
7115  VkDeviceSize allocSize,
7116  VkDeviceSize allocAlignment,
7117  VmaSuballocationType allocType,
7118  bool canMakeOtherLost,
7119  uint32_t strategy,
7120  VmaAllocationRequest* pAllocationRequest);
7121  bool CreateAllocationRequest_UpperAddress(
7122  uint32_t currentFrameIndex,
7123  uint32_t frameInUseCount,
7124  VkDeviceSize bufferImageGranularity,
7125  VkDeviceSize allocSize,
7126  VkDeviceSize allocAlignment,
7127  VmaSuballocationType allocType,
7128  bool canMakeOtherLost,
7129  uint32_t strategy,
7130  VmaAllocationRequest* pAllocationRequest);
7131 };
7132 
7133 /*
7134 - GetSize() is the original size of allocated memory block.
7135 - m_UsableSize is this size aligned down to a power of two.
7136  All allocations and calculations happen relative to m_UsableSize.
7137 - GetUnusableSize() is the difference between them.
7138  It is reported as separate, unused range, not available for allocations.
7139 
7140 Node at level 0 has size = m_UsableSize.
7141 Each next level contains nodes with size 2 times smaller than current level.
7142 m_LevelCount is the maximum number of levels to use in the current object.
7143 */
7144 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
7145 {
7146  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
7147 public:
7148  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
7149  virtual ~VmaBlockMetadata_Buddy();
7150  virtual void Init(VkDeviceSize size);
7151 
7152  virtual bool Validate() const;
7153  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
7154  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
7155  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
7156  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
7157 
7158  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
7159  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
7160 
7161 #if VMA_STATS_STRING_ENABLED
7162  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
7163 #endif
7164 
7165  virtual bool CreateAllocationRequest(
7166  uint32_t currentFrameIndex,
7167  uint32_t frameInUseCount,
7168  VkDeviceSize bufferImageGranularity,
7169  VkDeviceSize allocSize,
7170  VkDeviceSize allocAlignment,
7171  bool upperAddress,
7172  VmaSuballocationType allocType,
7173  bool canMakeOtherLost,
7174  uint32_t strategy,
7175  VmaAllocationRequest* pAllocationRequest);
7176 
7177  virtual bool MakeRequestedAllocationsLost(
7178  uint32_t currentFrameIndex,
7179  uint32_t frameInUseCount,
7180  VmaAllocationRequest* pAllocationRequest);
7181 
7182  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
7183 
7184  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
7185 
7186  virtual void Alloc(
7187  const VmaAllocationRequest& request,
7188  VmaSuballocationType type,
7189  VkDeviceSize allocSize,
7190  VmaAllocation hAllocation);
7191 
7192  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
7193  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
7194 
7195 private:
7196  static const VkDeviceSize MIN_NODE_SIZE = 32;
7197  static const size_t MAX_LEVELS = 30;
7198 
7199  struct ValidationContext
7200  {
7201  size_t calculatedAllocationCount;
7202  size_t calculatedFreeCount;
7203  VkDeviceSize calculatedSumFreeSize;
7204 
7205  ValidationContext() :
7206  calculatedAllocationCount(0),
7207  calculatedFreeCount(0),
7208  calculatedSumFreeSize(0) { }
7209  };
7210 
7211  struct Node
7212  {
7213  VkDeviceSize offset;
7214  enum TYPE
7215  {
7216  TYPE_FREE,
7217  TYPE_ALLOCATION,
7218  TYPE_SPLIT,
7219  TYPE_COUNT
7220  } type;
7221  Node* parent;
7222  Node* buddy;
7223 
7224  union
7225  {
7226  struct
7227  {
7228  Node* prev;
7229  Node* next;
7230  } free;
7231  struct
7232  {
7233  VmaAllocation alloc;
7234  } allocation;
7235  struct
7236  {
7237  Node* leftChild;
7238  } split;
7239  };
7240  };
7241 
7242  // Size of the memory block aligned down to a power of two.
7243  VkDeviceSize m_UsableSize;
7244  uint32_t m_LevelCount;
7245 
7246  Node* m_Root;
7247  struct {
7248  Node* front;
7249  Node* back;
7250  } m_FreeList[MAX_LEVELS];
7251  // Number of nodes in the tree with type == TYPE_ALLOCATION.
7252  size_t m_AllocationCount;
7253  // Number of nodes in the tree with type == TYPE_FREE.
7254  size_t m_FreeCount;
7255  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
7256  VkDeviceSize m_SumFreeSize;
7257 
7258  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
7259  void DeleteNode(Node* node);
7260  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
7261  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
7262  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
7263  // Alloc passed just for validation. Can be null.
7264  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
7265  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
7266  // Adds node to the front of FreeList at given level.
7267  // node->type must be FREE.
7268  // node->free.prev, next can be undefined.
7269  void AddToFreeListFront(uint32_t level, Node* node);
7270  // Removes node from FreeList at given level.
7271  // node->type must be FREE.
7272  // node->free.prev, next stay untouched.
7273  void RemoveFromFreeList(uint32_t level, Node* node);
7274 
7275 #if VMA_STATS_STRING_ENABLED
7276  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
7277 #endif
7278 };
7279 
7280 /*
7281 Represents a single block of device memory (`VkDeviceMemory`) with all the
7282 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
7283 
7284 Thread-safety: This class must be externally synchronized.
7285 */
7286 class VmaDeviceMemoryBlock
7287 {
7288  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
7289 public:
7290  VmaBlockMetadata* m_pMetadata;
7291 
7292  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
7293 
7294  ~VmaDeviceMemoryBlock()
7295  {
7296  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
7297  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
7298  }
7299 
7300  // Always call after construction.
7301  void Init(
7302  VmaAllocator hAllocator,
7303  VmaPool hParentPool,
7304  uint32_t newMemoryTypeIndex,
7305  VkDeviceMemory newMemory,
7306  VkDeviceSize newSize,
7307  uint32_t id,
7308  uint32_t algorithm);
7309  // Always call before destruction.
7310  void Destroy(VmaAllocator allocator);
7311 
7312  VmaPool GetParentPool() const { return m_hParentPool; }
7313  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
7314  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
7315  uint32_t GetId() const { return m_Id; }
7316  void* GetMappedData() const { return m_pMappedData; }
7317 
7318  // Validates all data structures inside this object. If not valid, returns false.
7319  bool Validate() const;
7320 
7321  VkResult CheckCorruption(VmaAllocator hAllocator);
7322 
7323  // ppData can be null.
7324  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
7325  void Unmap(VmaAllocator hAllocator, uint32_t count);
7326 
7327  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
7328  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
7329 
7330  VkResult BindBufferMemory(
7331  const VmaAllocator hAllocator,
7332  const VmaAllocation hAllocation,
7333  VkDeviceSize allocationLocalOffset,
7334  VkBuffer hBuffer,
7335  const void* pNext);
7336  VkResult BindImageMemory(
7337  const VmaAllocator hAllocator,
7338  const VmaAllocation hAllocation,
7339  VkDeviceSize allocationLocalOffset,
7340  VkImage hImage,
7341  const void* pNext);
7342 
7343 private:
7344  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
7345  uint32_t m_MemoryTypeIndex;
7346  uint32_t m_Id;
7347  VkDeviceMemory m_hMemory;
7348 
7349  /*
7350  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
7351  Also protects m_MapCount, m_pMappedData.
7352  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
7353  */
7354  VMA_MUTEX m_Mutex;
7355  uint32_t m_MapCount;
7356  void* m_pMappedData;
7357 };
7358 
7359 struct VmaDefragmentationMove
7360 {
7361  size_t srcBlockIndex;
7362  size_t dstBlockIndex;
7363  VkDeviceSize srcOffset;
7364  VkDeviceSize dstOffset;
7365  VkDeviceSize size;
7366  VmaAllocation hAllocation;
7367  VmaDeviceMemoryBlock* pSrcBlock;
7368  VmaDeviceMemoryBlock* pDstBlock;
7369 };
7370 
7371 class VmaDefragmentationAlgorithm;
7372 
7373 /*
7374 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
7375 Vulkan memory type.
7376 
7377 Synchronized internally with a mutex.
7378 */
7379 struct VmaBlockVector
7380 {
7381  VMA_CLASS_NO_COPY(VmaBlockVector)
7382 public:
7383  VmaBlockVector(
7384  VmaAllocator hAllocator,
7385  VmaPool hParentPool,
7386  uint32_t memoryTypeIndex,
7387  VkDeviceSize preferredBlockSize,
7388  size_t minBlockCount,
7389  size_t maxBlockCount,
7390  VkDeviceSize bufferImageGranularity,
7391  uint32_t frameInUseCount,
7392  bool explicitBlockSize,
7393  uint32_t algorithm,
7394  float priority,
7395  VkDeviceSize minAllocationAlignment,
7396  void* pMemoryAllocateNext);
7397  ~VmaBlockVector();
7398 
7399  VkResult CreateMinBlocks();
7400 
7401  VmaAllocator GetAllocator() const { return m_hAllocator; }
7402  VmaPool GetParentPool() const { return m_hParentPool; }
7403  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
7404  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
7405  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
7406  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
7407  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
7408  uint32_t GetAlgorithm() const { return m_Algorithm; }
7409 
7410  void GetPoolStats(VmaPoolStats* pStats);
7411 
7412  bool IsEmpty();
7413  bool IsCorruptionDetectionEnabled() const;
7414 
7415  VkResult Allocate(
7416  uint32_t currentFrameIndex,
7417  VkDeviceSize size,
7418  VkDeviceSize alignment,
7419  const VmaAllocationCreateInfo& createInfo,
7420  VmaSuballocationType suballocType,
7421  size_t allocationCount,
7422  VmaAllocation* pAllocations);
7423 
7424  void Free(const VmaAllocation hAllocation);
7425 
7426  // Adds statistics of this BlockVector to pStats.
7427  void AddStats(VmaStats* pStats);
7428 
7429 #if VMA_STATS_STRING_ENABLED
7430  void PrintDetailedMap(class VmaJsonWriter& json);
7431 #endif
7432 
7433  void MakePoolAllocationsLost(
7434  uint32_t currentFrameIndex,
7435  size_t* pLostAllocationCount);
7436  VkResult CheckCorruption();
7437 
7438  // Saves results in pCtx->res.
7439  void Defragment(
7440  class VmaBlockVectorDefragmentationContext* pCtx,
7442  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
7443  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
7444  VkCommandBuffer commandBuffer);
7445  void DefragmentationEnd(
7446  class VmaBlockVectorDefragmentationContext* pCtx,
7447  uint32_t flags,
7448  VmaDefragmentationStats* pStats);
7449 
7450  uint32_t ProcessDefragmentations(
7451  class VmaBlockVectorDefragmentationContext *pCtx,
7452  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
7453 
7454  void CommitDefragmentations(
7455  class VmaBlockVectorDefragmentationContext *pCtx,
7456  VmaDefragmentationStats* pStats);
7457 
7459  // To be used only while the m_Mutex is locked. Used during defragmentation.
7460 
7461  size_t GetBlockCount() const { return m_Blocks.size(); }
7462  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
7463  size_t CalcAllocationCount() const;
7464  bool IsBufferImageGranularityConflictPossible() const;
7465 
7466 private:
7467  friend class VmaDefragmentationAlgorithm_Generic;
7468 
7469  const VmaAllocator m_hAllocator;
7470  const VmaPool m_hParentPool;
7471  const uint32_t m_MemoryTypeIndex;
7472  const VkDeviceSize m_PreferredBlockSize;
7473  const size_t m_MinBlockCount;
7474  const size_t m_MaxBlockCount;
7475  const VkDeviceSize m_BufferImageGranularity;
7476  const uint32_t m_FrameInUseCount;
7477  const bool m_ExplicitBlockSize;
7478  const uint32_t m_Algorithm;
7479  const float m_Priority;
7480  const VkDeviceSize m_MinAllocationAlignment;
7481  void* const m_pMemoryAllocateNext;
7482  VMA_RW_MUTEX m_Mutex;
7483 
7484  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
7485  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
7486  bool m_HasEmptyBlock;
7487  // Incrementally sorted by sumFreeSize, ascending.
7488  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
7489  uint32_t m_NextBlockId;
7490 
7491  VkDeviceSize CalcMaxBlockSize() const;
7492 
7493  // Finds and removes given block from vector.
7494  void Remove(VmaDeviceMemoryBlock* pBlock);
7495 
7496  // Performs single step in sorting m_Blocks. They may not be fully sorted
7497  // after this call.
7498  void IncrementallySortBlocks();
7499 
7500  VkResult AllocatePage(
7501  uint32_t currentFrameIndex,
7502  VkDeviceSize size,
7503  VkDeviceSize alignment,
7504  const VmaAllocationCreateInfo& createInfo,
7505  VmaSuballocationType suballocType,
7506  VmaAllocation* pAllocation);
7507 
7508  // To be used only without CAN_MAKE_OTHER_LOST flag.
7509  VkResult AllocateFromBlock(
7510  VmaDeviceMemoryBlock* pBlock,
7511  uint32_t currentFrameIndex,
7512  VkDeviceSize size,
7513  VkDeviceSize alignment,
7514  VmaAllocationCreateFlags allocFlags,
7515  void* pUserData,
7516  VmaSuballocationType suballocType,
7517  uint32_t strategy,
7518  VmaAllocation* pAllocation);
7519 
7520  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
7521 
7522  // Saves result to pCtx->res.
7523  void ApplyDefragmentationMovesCpu(
7524  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7525  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
7526  // Saves result to pCtx->res.
7527  void ApplyDefragmentationMovesGpu(
7528  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7529  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7530  VkCommandBuffer commandBuffer);
7531 
7532  /*
7533  Used during defragmentation. pDefragmentationStats is optional. It's in/out
7534  - updated with new data.
7535  */
7536  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
7537 
7538  void UpdateHasEmptyBlock();
7539 };
7540 
7541 struct VmaPool_T
7542 {
7543  VMA_CLASS_NO_COPY(VmaPool_T)
7544 public:
7545  VmaBlockVector m_BlockVector;
7546 
7547  VmaPool_T(
7548  VmaAllocator hAllocator,
7549  const VmaPoolCreateInfo& createInfo,
7550  VkDeviceSize preferredBlockSize);
7551  ~VmaPool_T();
7552 
7553  uint32_t GetId() const { return m_Id; }
7554  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
7555 
7556  const char* GetName() const { return m_Name; }
7557  void SetName(const char* pName);
7558 
7559 #if VMA_STATS_STRING_ENABLED
7560  //void PrintDetailedMap(class VmaStringBuilder& sb);
7561 #endif
7562 
7563 private:
7564  uint32_t m_Id;
7565  char* m_Name;
7566  VmaPool_T* m_PrevPool = VMA_NULL;
7567  VmaPool_T* m_NextPool = VMA_NULL;
7568  friend struct VmaPoolListItemTraits;
7569 };
7570 
7571 struct VmaPoolListItemTraits
7572 {
7573  typedef VmaPool_T ItemType;
7574  static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
7575  static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
7576  static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
7577  static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
7578 };
7579 
7580 /*
7581 Performs defragmentation:
7582 
7583 - Updates `pBlockVector->m_pMetadata`.
7584 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
7585 - Does not move actual data, only returns requested moves as `moves`.
7586 */
7587 class VmaDefragmentationAlgorithm
7588 {
7589  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
7590 public:
7591  VmaDefragmentationAlgorithm(
7592  VmaAllocator hAllocator,
7593  VmaBlockVector* pBlockVector,
7594  uint32_t currentFrameIndex) :
7595  m_hAllocator(hAllocator),
7596  m_pBlockVector(pBlockVector),
7597  m_CurrentFrameIndex(currentFrameIndex)
7598  {
7599  }
7600  virtual ~VmaDefragmentationAlgorithm()
7601  {
7602  }
7603 
7604  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
7605  virtual void AddAll() = 0;
7606 
7607  virtual VkResult Defragment(
7608  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7609  VkDeviceSize maxBytesToMove,
7610  uint32_t maxAllocationsToMove,
7611  VmaDefragmentationFlags flags) = 0;
7612 
7613  virtual VkDeviceSize GetBytesMoved() const = 0;
7614  virtual uint32_t GetAllocationsMoved() const = 0;
7615 
7616 protected:
7617  VmaAllocator const m_hAllocator;
7618  VmaBlockVector* const m_pBlockVector;
7619  const uint32_t m_CurrentFrameIndex;
7620 
7621  struct AllocationInfo
7622  {
7623  VmaAllocation m_hAllocation;
7624  VkBool32* m_pChanged;
7625 
7626  AllocationInfo() :
7627  m_hAllocation(VK_NULL_HANDLE),
7628  m_pChanged(VMA_NULL)
7629  {
7630  }
7631  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
7632  m_hAllocation(hAlloc),
7633  m_pChanged(pChanged)
7634  {
7635  }
7636  };
7637 };
7638 
7639 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
7640 {
7641  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
7642 public:
7643  VmaDefragmentationAlgorithm_Generic(
7644  VmaAllocator hAllocator,
7645  VmaBlockVector* pBlockVector,
7646  uint32_t currentFrameIndex,
7647  bool overlappingMoveSupported);
7648  virtual ~VmaDefragmentationAlgorithm_Generic();
7649 
7650  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7651  virtual void AddAll() { m_AllAllocations = true; }
7652 
7653  virtual VkResult Defragment(
7654  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7655  VkDeviceSize maxBytesToMove,
7656  uint32_t maxAllocationsToMove,
7657  VmaDefragmentationFlags flags);
7658 
7659  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7660  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7661 
7662 private:
7663  uint32_t m_AllocationCount;
7664  bool m_AllAllocations;
7665 
7666  VkDeviceSize m_BytesMoved;
7667  uint32_t m_AllocationsMoved;
7668 
7669  struct AllocationInfoSizeGreater
7670  {
7671  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7672  {
7673  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
7674  }
7675  };
7676 
7677  struct AllocationInfoOffsetGreater
7678  {
7679  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7680  {
7681  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
7682  }
7683  };
7684 
7685  struct BlockInfo
7686  {
7687  size_t m_OriginalBlockIndex;
7688  VmaDeviceMemoryBlock* m_pBlock;
7689  bool m_HasNonMovableAllocations;
7690  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
7691 
7692  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
7693  m_OriginalBlockIndex(SIZE_MAX),
7694  m_pBlock(VMA_NULL),
7695  m_HasNonMovableAllocations(true),
7696  m_Allocations(pAllocationCallbacks)
7697  {
7698  }
7699 
7700  void CalcHasNonMovableAllocations()
7701  {
7702  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
7703  const size_t defragmentAllocCount = m_Allocations.size();
7704  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
7705  }
7706 
7707  void SortAllocationsBySizeDescending()
7708  {
7709  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
7710  }
7711 
7712  void SortAllocationsByOffsetDescending()
7713  {
7714  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
7715  }
7716  };
7717 
7718  struct BlockPointerLess
7719  {
7720  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
7721  {
7722  return pLhsBlockInfo->m_pBlock < pRhsBlock;
7723  }
7724  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7725  {
7726  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
7727  }
7728  };
7729 
7730  // 1. Blocks with some non-movable allocations go first.
7731  // 2. Blocks with smaller sumFreeSize go first.
7732  struct BlockInfoCompareMoveDestination
7733  {
7734  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7735  {
7736  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
7737  {
7738  return true;
7739  }
7740  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
7741  {
7742  return false;
7743  }
7744  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
7745  {
7746  return true;
7747  }
7748  return false;
7749  }
7750  };
7751 
7752  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
7753  BlockInfoVector m_Blocks;
7754 
7755  VkResult DefragmentRound(
7756  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7757  VkDeviceSize maxBytesToMove,
7758  uint32_t maxAllocationsToMove,
7759  bool freeOldAllocations);
7760 
7761  size_t CalcBlocksWithNonMovableCount() const;
7762 
7763  static bool MoveMakesSense(
7764  size_t dstBlockIndex, VkDeviceSize dstOffset,
7765  size_t srcBlockIndex, VkDeviceSize srcOffset);
7766 };
7767 
7768 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
7769 {
7770  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
7771 public:
7772  VmaDefragmentationAlgorithm_Fast(
7773  VmaAllocator hAllocator,
7774  VmaBlockVector* pBlockVector,
7775  uint32_t currentFrameIndex,
7776  bool overlappingMoveSupported);
7777  virtual ~VmaDefragmentationAlgorithm_Fast();
7778 
7779  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
7780  virtual void AddAll() { m_AllAllocations = true; }
7781 
7782  virtual VkResult Defragment(
7783  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7784  VkDeviceSize maxBytesToMove,
7785  uint32_t maxAllocationsToMove,
7786  VmaDefragmentationFlags flags);
7787 
7788  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7789  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7790 
7791 private:
7792  struct BlockInfo
7793  {
7794  size_t origBlockIndex;
7795  };
7796 
7797  class FreeSpaceDatabase
7798  {
7799  public:
7800  FreeSpaceDatabase()
7801  {
7802  FreeSpace s = {};
7803  s.blockInfoIndex = SIZE_MAX;
7804  for(size_t i = 0; i < MAX_COUNT; ++i)
7805  {
7806  m_FreeSpaces[i] = s;
7807  }
7808  }
7809 
7810  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
7811  {
7812  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7813  {
7814  return;
7815  }
7816 
7817  // Find first invalid or the smallest structure.
7818  size_t bestIndex = SIZE_MAX;
7819  for(size_t i = 0; i < MAX_COUNT; ++i)
7820  {
7821  // Empty structure.
7822  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
7823  {
7824  bestIndex = i;
7825  break;
7826  }
7827  if(m_FreeSpaces[i].size < size &&
7828  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
7829  {
7830  bestIndex = i;
7831  }
7832  }
7833 
7834  if(bestIndex != SIZE_MAX)
7835  {
7836  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
7837  m_FreeSpaces[bestIndex].offset = offset;
7838  m_FreeSpaces[bestIndex].size = size;
7839  }
7840  }
7841 
7842  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
7843  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
7844  {
7845  size_t bestIndex = SIZE_MAX;
7846  VkDeviceSize bestFreeSpaceAfter = 0;
7847  for(size_t i = 0; i < MAX_COUNT; ++i)
7848  {
7849  // Structure is valid.
7850  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
7851  {
7852  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
7853  // Allocation fits into this structure.
7854  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
7855  {
7856  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
7857  (dstOffset + size);
7858  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
7859  {
7860  bestIndex = i;
7861  bestFreeSpaceAfter = freeSpaceAfter;
7862  }
7863  }
7864  }
7865  }
7866 
7867  if(bestIndex != SIZE_MAX)
7868  {
7869  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
7870  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
7871 
7872  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7873  {
7874  // Leave this structure for remaining empty space.
7875  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
7876  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
7877  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
7878  }
7879  else
7880  {
7881  // This structure becomes invalid.
7882  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
7883  }
7884 
7885  return true;
7886  }
7887 
7888  return false;
7889  }
7890 
7891  private:
7892  static const size_t MAX_COUNT = 4;
7893 
7894  struct FreeSpace
7895  {
7896  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
7897  VkDeviceSize offset;
7898  VkDeviceSize size;
7899  } m_FreeSpaces[MAX_COUNT];
7900  };
7901 
7902  const bool m_OverlappingMoveSupported;
7903 
7904  uint32_t m_AllocationCount;
7905  bool m_AllAllocations;
7906 
7907  VkDeviceSize m_BytesMoved;
7908  uint32_t m_AllocationsMoved;
7909 
7910  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
7911 
7912  void PreprocessMetadata();
7913  void PostprocessMetadata();
7914  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
7915 };
7916 
7917 struct VmaBlockDefragmentationContext
7918 {
7919  enum BLOCK_FLAG
7920  {
7921  BLOCK_FLAG_USED = 0x00000001,
7922  };
7923  uint32_t flags;
7924  VkBuffer hBuffer;
7925 };
7926 
7927 class VmaBlockVectorDefragmentationContext
7928 {
7929  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
7930 public:
7931  VkResult res;
7932  bool mutexLocked;
7933  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
7934  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
7935  uint32_t defragmentationMovesProcessed;
7936  uint32_t defragmentationMovesCommitted;
7937  bool hasDefragmentationPlan;
7938 
7939  VmaBlockVectorDefragmentationContext(
7940  VmaAllocator hAllocator,
7941  VmaPool hCustomPool, // Optional.
7942  VmaBlockVector* pBlockVector,
7943  uint32_t currFrameIndex);
7944  ~VmaBlockVectorDefragmentationContext();
7945 
7946  VmaPool GetCustomPool() const { return m_hCustomPool; }
7947  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7948  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7949 
7950  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7951  void AddAll() { m_AllAllocations = true; }
7952 
7953  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7954 
7955 private:
7956  const VmaAllocator m_hAllocator;
7957  // Null if not from custom pool.
7958  const VmaPool m_hCustomPool;
7959  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7960  VmaBlockVector* const m_pBlockVector;
7961  const uint32_t m_CurrFrameIndex;
7962  // Owner of this object.
7963  VmaDefragmentationAlgorithm* m_pAlgorithm;
7964 
7965  struct AllocInfo
7966  {
7967  VmaAllocation hAlloc;
7968  VkBool32* pChanged;
7969  };
7970  // Used between constructor and Begin.
7971  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7972  bool m_AllAllocations;
7973 };
7974 
7975 struct VmaDefragmentationContext_T
7976 {
7977 private:
7978  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7979 public:
7980  VmaDefragmentationContext_T(
7981  VmaAllocator hAllocator,
7982  uint32_t currFrameIndex,
7983  uint32_t flags,
7984  VmaDefragmentationStats* pStats);
7985  ~VmaDefragmentationContext_T();
7986 
7987  void AddPools(uint32_t poolCount, const VmaPool* pPools);
7988  void AddAllocations(
7989  uint32_t allocationCount,
7990  const VmaAllocation* pAllocations,
7991  VkBool32* pAllocationsChanged);
7992 
7993  /*
7994  Returns:
7995  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7996  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7997  - Negative value if error occurred and object can be destroyed immediately.
7998  */
7999  VkResult Defragment(
8000  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
8001  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
8002  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
8003 
8004  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
8005  VkResult DefragmentPassEnd();
8006 
8007 private:
8008  const VmaAllocator m_hAllocator;
8009  const uint32_t m_CurrFrameIndex;
8010  const uint32_t m_Flags;
8011  VmaDefragmentationStats* const m_pStats;
8012 
8013  VkDeviceSize m_MaxCpuBytesToMove;
8014  uint32_t m_MaxCpuAllocationsToMove;
8015  VkDeviceSize m_MaxGpuBytesToMove;
8016  uint32_t m_MaxGpuAllocationsToMove;
8017 
8018  // Owner of these objects.
8019  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
8020  // Owner of these objects.
8021  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
8022 };
8023 
8024 #if VMA_RECORDING_ENABLED
8025 
8026 class VmaRecorder
8027 {
8028 public:
8029  VmaRecorder();
8030  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
8031  void WriteConfiguration(
8032  const VkPhysicalDeviceProperties& devProps,
8033  const VkPhysicalDeviceMemoryProperties& memProps,
8034  uint32_t vulkanApiVersion,
8035  bool dedicatedAllocationExtensionEnabled,
8036  bool bindMemory2ExtensionEnabled,
8037  bool memoryBudgetExtensionEnabled,
8038  bool deviceCoherentMemoryExtensionEnabled);
8039  ~VmaRecorder();
8040 
8041  void RecordCreateAllocator(uint32_t frameIndex);
8042  void RecordDestroyAllocator(uint32_t frameIndex);
8043  void RecordCreatePool(uint32_t frameIndex,
8044  const VmaPoolCreateInfo& createInfo,
8045  VmaPool pool);
8046  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
8047  void RecordAllocateMemory(uint32_t frameIndex,
8048  const VkMemoryRequirements& vkMemReq,
8049  const VmaAllocationCreateInfo& createInfo,
8050  VmaAllocation allocation);
8051  void RecordAllocateMemoryPages(uint32_t frameIndex,
8052  const VkMemoryRequirements& vkMemReq,
8053  const VmaAllocationCreateInfo& createInfo,
8054  uint64_t allocationCount,
8055  const VmaAllocation* pAllocations);
8056  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
8057  const VkMemoryRequirements& vkMemReq,
8058  bool requiresDedicatedAllocation,
8059  bool prefersDedicatedAllocation,
8060  const VmaAllocationCreateInfo& createInfo,
8061  VmaAllocation allocation);
8062  void RecordAllocateMemoryForImage(uint32_t frameIndex,
8063  const VkMemoryRequirements& vkMemReq,
8064  bool requiresDedicatedAllocation,
8065  bool prefersDedicatedAllocation,
8066  const VmaAllocationCreateInfo& createInfo,
8067  VmaAllocation allocation);
8068  void RecordFreeMemory(uint32_t frameIndex,
8069  VmaAllocation allocation);
8070  void RecordFreeMemoryPages(uint32_t frameIndex,
8071  uint64_t allocationCount,
8072  const VmaAllocation* pAllocations);
8073  void RecordSetAllocationUserData(uint32_t frameIndex,
8074  VmaAllocation allocation,
8075  const void* pUserData);
8076  void RecordCreateLostAllocation(uint32_t frameIndex,
8077  VmaAllocation allocation);
8078  void RecordMapMemory(uint32_t frameIndex,
8079  VmaAllocation allocation);
8080  void RecordUnmapMemory(uint32_t frameIndex,
8081  VmaAllocation allocation);
8082  void RecordFlushAllocation(uint32_t frameIndex,
8083  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
8084  void RecordInvalidateAllocation(uint32_t frameIndex,
8085  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
8086  void RecordCreateBuffer(uint32_t frameIndex,
8087  const VkBufferCreateInfo& bufCreateInfo,
8088  const VmaAllocationCreateInfo& allocCreateInfo,
8089  VmaAllocation allocation);
8090  void RecordCreateImage(uint32_t frameIndex,
8091  const VkImageCreateInfo& imageCreateInfo,
8092  const VmaAllocationCreateInfo& allocCreateInfo,
8093  VmaAllocation allocation);
8094  void RecordDestroyBuffer(uint32_t frameIndex,
8095  VmaAllocation allocation);
8096  void RecordDestroyImage(uint32_t frameIndex,
8097  VmaAllocation allocation);
8098  void RecordTouchAllocation(uint32_t frameIndex,
8099  VmaAllocation allocation);
8100  void RecordGetAllocationInfo(uint32_t frameIndex,
8101  VmaAllocation allocation);
8102  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
8103  VmaPool pool);
8104  void RecordDefragmentationBegin(uint32_t frameIndex,
8105  const VmaDefragmentationInfo2& info,
8107  void RecordDefragmentationEnd(uint32_t frameIndex,
8109  void RecordSetPoolName(uint32_t frameIndex,
8110  VmaPool pool,
8111  const char* name);
8112 
8113 private:
8114  struct CallParams
8115  {
8116  uint32_t threadId;
8117  double time;
8118  };
8119 
8120  class UserDataString
8121  {
8122  public:
8123  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
8124  const char* GetString() const { return m_Str; }
8125 
8126  private:
8127  char m_PtrStr[17];
8128  const char* m_Str;
8129  };
8130 
8131  bool m_UseMutex;
8132  VmaRecordFlags m_Flags;
8133  FILE* m_File;
8134  VMA_MUTEX m_FileMutex;
8135  std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
8136 
8137  void GetBasicParams(CallParams& outParams);
8138 
8139  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
8140  template<typename T>
8141  void PrintPointerList(uint64_t count, const T* pItems)
8142  {
8143  if(count)
8144  {
8145  fprintf(m_File, "%p", pItems[0]);
8146  for(uint64_t i = 1; i < count; ++i)
8147  {
8148  fprintf(m_File, " %p", pItems[i]);
8149  }
8150  }
8151  }
8152 
8153  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
8154  void Flush();
8155 };
8156 
8157 #endif // #if VMA_RECORDING_ENABLED
8158 
8159 /*
8160 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
8161 */
8162 class VmaAllocationObjectAllocator
8163 {
8164  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
8165 public:
8166  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
8167 
8168  template<typename... Types> VmaAllocation Allocate(Types... args);
8169  void Free(VmaAllocation hAlloc);
8170 
8171 private:
8172  VMA_MUTEX m_Mutex;
8173  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
8174 };
8175 
8176 struct VmaCurrentBudgetData
8177 {
8178  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
8179  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
8180 
8181 #if VMA_MEMORY_BUDGET
8182  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
8183  VMA_RW_MUTEX m_BudgetMutex;
8184  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
8185  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
8186  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
8187 #endif // #if VMA_MEMORY_BUDGET
8188 
8189  VmaCurrentBudgetData()
8190  {
8191  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
8192  {
8193  m_BlockBytes[heapIndex] = 0;
8194  m_AllocationBytes[heapIndex] = 0;
8195 #if VMA_MEMORY_BUDGET
8196  m_VulkanUsage[heapIndex] = 0;
8197  m_VulkanBudget[heapIndex] = 0;
8198  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
8199 #endif
8200  }
8201 
8202 #if VMA_MEMORY_BUDGET
8203  m_OperationsSinceBudgetFetch = 0;
8204 #endif
8205  }
8206 
8207  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
8208  {
8209  m_AllocationBytes[heapIndex] += allocationSize;
8210 #if VMA_MEMORY_BUDGET
8211  ++m_OperationsSinceBudgetFetch;
8212 #endif
8213  }
8214 
8215  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
8216  {
8217  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
8218  m_AllocationBytes[heapIndex] -= allocationSize;
8219 #if VMA_MEMORY_BUDGET
8220  ++m_OperationsSinceBudgetFetch;
8221 #endif
8222  }
8223 };
8224 
8225 // Main allocator object.
8226 struct VmaAllocator_T
8227 {
8228  VMA_CLASS_NO_COPY(VmaAllocator_T)
8229 public:
8230  bool m_UseMutex;
8231  uint32_t m_VulkanApiVersion;
8232  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
8233  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
8234  bool m_UseExtMemoryBudget;
8235  bool m_UseAmdDeviceCoherentMemory;
8236  bool m_UseKhrBufferDeviceAddress;
8237  bool m_UseExtMemoryPriority;
8238  VkDevice m_hDevice;
8239  VkInstance m_hInstance;
8240  bool m_AllocationCallbacksSpecified;
8241  VkAllocationCallbacks m_AllocationCallbacks;
8242  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
8243  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
8244 
8245  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
8246  uint32_t m_HeapSizeLimitMask;
8247 
8248  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
8249  VkPhysicalDeviceMemoryProperties m_MemProps;
8250 
8251  // Default pools.
8252  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
8253 
8254  typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
8255  DedicatedAllocationLinkedList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
8256  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
8257 
8258  VmaCurrentBudgetData m_Budget;
8259  VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
8260 
8261  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
8262  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
8263  ~VmaAllocator_T();
8264 
8265  const VkAllocationCallbacks* GetAllocationCallbacks() const
8266  {
8267  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
8268  }
8269  const VmaVulkanFunctions& GetVulkanFunctions() const
8270  {
8271  return m_VulkanFunctions;
8272  }
8273 
8274  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
8275 
8276  VkDeviceSize GetBufferImageGranularity() const
8277  {
8278  return VMA_MAX(
8279  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
8280  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
8281  }
8282 
8283  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
8284  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
8285 
8286  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
8287  {
8288  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
8289  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
8290  }
8291  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
8292  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
8293  {
8294  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
8295  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8296  }
8297  // Minimum alignment for all allocations in specific memory type.
8298  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
8299  {
8300  return IsMemoryTypeNonCoherent(memTypeIndex) ?
8301  VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
8302  (VkDeviceSize)VMA_MIN_ALIGNMENT;
8303  }
8304 
8305  bool IsIntegratedGpu() const
8306  {
8307  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
8308  }
8309 
8310  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
8311 
8312 #if VMA_RECORDING_ENABLED
8313  VmaRecorder* GetRecorder() const { return m_pRecorder; }
8314 #endif
8315 
8316  void GetBufferMemoryRequirements(
8317  VkBuffer hBuffer,
8318  VkMemoryRequirements& memReq,
8319  bool& requiresDedicatedAllocation,
8320  bool& prefersDedicatedAllocation) const;
8321  void GetImageMemoryRequirements(
8322  VkImage hImage,
8323  VkMemoryRequirements& memReq,
8324  bool& requiresDedicatedAllocation,
8325  bool& prefersDedicatedAllocation) const;
8326 
8327  // Main allocation function.
8328  VkResult AllocateMemory(
8329  const VkMemoryRequirements& vkMemReq,
8330  bool requiresDedicatedAllocation,
8331  bool prefersDedicatedAllocation,
8332  VkBuffer dedicatedBuffer,
8333  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
8334  VkImage dedicatedImage,
8335  const VmaAllocationCreateInfo& createInfo,
8336  VmaSuballocationType suballocType,
8337  size_t allocationCount,
8338  VmaAllocation* pAllocations);
8339 
8340  // Main deallocation function.
8341  void FreeMemory(
8342  size_t allocationCount,
8343  const VmaAllocation* pAllocations);
8344 
8345  void CalculateStats(VmaStats* pStats);
8346 
8347  void GetBudget(
8348  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
8349 
8350 #if VMA_STATS_STRING_ENABLED
8351  void PrintDetailedMap(class VmaJsonWriter& json);
8352 #endif
8353 
8354  VkResult DefragmentationBegin(
8355  const VmaDefragmentationInfo2& info,
8356  VmaDefragmentationStats* pStats,
8357  VmaDefragmentationContext* pContext);
8358  VkResult DefragmentationEnd(
8359  VmaDefragmentationContext context);
8360 
8361  VkResult DefragmentationPassBegin(
8363  VmaDefragmentationContext context);
8364  VkResult DefragmentationPassEnd(
8365  VmaDefragmentationContext context);
8366 
8367  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
8368  bool TouchAllocation(VmaAllocation hAllocation);
8369 
8370  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
8371  void DestroyPool(VmaPool pool);
8372  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
8373 
8374  void SetCurrentFrameIndex(uint32_t frameIndex);
8375  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
8376 
8377  void MakePoolAllocationsLost(
8378  VmaPool hPool,
8379  size_t* pLostAllocationCount);
8380  VkResult CheckPoolCorruption(VmaPool hPool);
8381  VkResult CheckCorruption(uint32_t memoryTypeBits);
8382 
8383  void CreateLostAllocation(VmaAllocation* pAllocation);
8384 
8385  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
8386  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
8387  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
8388  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
8389  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
8390  VkResult BindVulkanBuffer(
8391  VkDeviceMemory memory,
8392  VkDeviceSize memoryOffset,
8393  VkBuffer buffer,
8394  const void* pNext);
8395  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
8396  VkResult BindVulkanImage(
8397  VkDeviceMemory memory,
8398  VkDeviceSize memoryOffset,
8399  VkImage image,
8400  const void* pNext);
8401 
8402  VkResult Map(VmaAllocation hAllocation, void** ppData);
8403  void Unmap(VmaAllocation hAllocation);
8404 
8405  VkResult BindBufferMemory(
8406  VmaAllocation hAllocation,
8407  VkDeviceSize allocationLocalOffset,
8408  VkBuffer hBuffer,
8409  const void* pNext);
8410  VkResult BindImageMemory(
8411  VmaAllocation hAllocation,
8412  VkDeviceSize allocationLocalOffset,
8413  VkImage hImage,
8414  const void* pNext);
8415 
8416  VkResult FlushOrInvalidateAllocation(
8417  VmaAllocation hAllocation,
8418  VkDeviceSize offset, VkDeviceSize size,
8419  VMA_CACHE_OPERATION op);
8420  VkResult FlushOrInvalidateAllocations(
8421  uint32_t allocationCount,
8422  const VmaAllocation* allocations,
8423  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
8424  VMA_CACHE_OPERATION op);
8425 
8426  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
8427 
8428  /*
8429  Returns bit mask of memory types that can support defragmentation on GPU as
8430  they support creation of required buffer for copy operations.
8431  */
8432  uint32_t GetGpuDefragmentationMemoryTypeBits();
8433 
8434 #if VMA_EXTERNAL_MEMORY
8435  VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const
8436  {
8437  return m_TypeExternalMemoryHandleTypes[memTypeIndex];
8438  }
8439 #endif // #if VMA_EXTERNAL_MEMORY
8440 
8441 private:
8442  VkDeviceSize m_PreferredLargeHeapBlockSize;
8443 
8444  VkPhysicalDevice m_PhysicalDevice;
8445  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
8446  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
8447 #if VMA_EXTERNAL_MEMORY
8448  VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];
8449 #endif // #if VMA_EXTERNAL_MEMORY
8450 
8451  VMA_RW_MUTEX m_PoolsMutex;
8452  typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
8453  // Protected by m_PoolsMutex.
8454  PoolList m_Pools;
8455  uint32_t m_NextPoolId;
8456 
8457  VmaVulkanFunctions m_VulkanFunctions;
8458 
8459  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
8460  uint32_t m_GlobalMemoryTypeBits;
8461 
8462 #if VMA_RECORDING_ENABLED
8463  VmaRecorder* m_pRecorder;
8464 #endif
8465 
8466  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
8467 
8468 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
8469  void ImportVulkanFunctions_Static();
8470 #endif
8471 
8472  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
8473 
8474 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
8475  void ImportVulkanFunctions_Dynamic();
8476 #endif
8477 
8478  void ValidateVulkanFunctions();
8479 
8480  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
8481 
8482  VkResult AllocateMemoryOfType(
8483  VkDeviceSize size,
8484  VkDeviceSize alignment,
8485  bool dedicatedAllocation,
8486  VkBuffer dedicatedBuffer,
8487  VkBufferUsageFlags dedicatedBufferUsage,
8488  VkImage dedicatedImage,
8489  const VmaAllocationCreateInfo& createInfo,
8490  uint32_t memTypeIndex,
8491  VmaSuballocationType suballocType,
8492  size_t allocationCount,
8493  VmaAllocation* pAllocations);
8494 
8495  // Helper function only to be used inside AllocateDedicatedMemory.
8496  VkResult AllocateDedicatedMemoryPage(
8497  VkDeviceSize size,
8498  VmaSuballocationType suballocType,
8499  uint32_t memTypeIndex,
8500  const VkMemoryAllocateInfo& allocInfo,
8501  bool map,
8502  bool isUserDataString,
8503  void* pUserData,
8504  VmaAllocation* pAllocation);
8505 
8506  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
8507  VkResult AllocateDedicatedMemory(
8508  VkDeviceSize size,
8509  VmaSuballocationType suballocType,
8510  uint32_t memTypeIndex,
8511  bool withinBudget,
8512  bool map,
8513  bool isUserDataString,
8514  void* pUserData,
8515  float priority,
8516  VkBuffer dedicatedBuffer,
8517  VkBufferUsageFlags dedicatedBufferUsage,
8518  VkImage dedicatedImage,
8519  size_t allocationCount,
8520  VmaAllocation* pAllocations);
8521 
8522  void FreeDedicatedMemory(const VmaAllocation allocation);
8523 
8524  /*
8525  Calculates and returns bit mask of memory types that can support defragmentation
8526  on GPU as they support creation of required buffer for copy operations.
8527  */
8528  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
8529 
8530  uint32_t CalculateGlobalMemoryTypeBits() const;
8531 
8532  bool GetFlushOrInvalidateRange(
8533  VmaAllocation allocation,
8534  VkDeviceSize offset, VkDeviceSize size,
8535  VkMappedMemoryRange& outRange) const;
8536 
8537 #if VMA_MEMORY_BUDGET
8538  void UpdateVulkanBudget();
8539 #endif // #if VMA_MEMORY_BUDGET
8540 };
8541 
8543 // Memory allocation #2 after VmaAllocator_T definition
8544 
8545 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
8546 {
8547  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
8548 }
8549 
8550 static void VmaFree(VmaAllocator hAllocator, void* ptr)
8551 {
8552  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
8553 }
8554 
8555 template<typename T>
8556 static T* VmaAllocate(VmaAllocator hAllocator)
8557 {
8558  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
8559 }
8560 
8561 template<typename T>
8562 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
8563 {
8564  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
8565 }
8566 
8567 template<typename T>
8568 static void vma_delete(VmaAllocator hAllocator, T* ptr)
8569 {
8570  if(ptr != VMA_NULL)
8571  {
8572  ptr->~T();
8573  VmaFree(hAllocator, ptr);
8574  }
8575 }
8576 
8577 template<typename T>
8578 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
8579 {
8580  if(ptr != VMA_NULL)
8581  {
8582  for(size_t i = count; i--; )
8583  ptr[i].~T();
8584  VmaFree(hAllocator, ptr);
8585  }
8586 }
8587 
8589 // VmaStringBuilder
8590 
8591 #if VMA_STATS_STRING_ENABLED
8592 
8593 class VmaStringBuilder
8594 {
8595 public:
8596  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
8597  size_t GetLength() const { return m_Data.size(); }
8598  const char* GetData() const { return m_Data.data(); }
8599 
8600  void Add(char ch) { m_Data.push_back(ch); }
8601  void Add(const char* pStr);
8602  void AddNewLine() { Add('\n'); }
8603  void AddNumber(uint32_t num);
8604  void AddNumber(uint64_t num);
8605  void AddPointer(const void* ptr);
8606 
8607 private:
8608  VmaVector< char, VmaStlAllocator<char> > m_Data;
8609 };
8610 
8611 void VmaStringBuilder::Add(const char* pStr)
8612 {
8613  const size_t strLen = strlen(pStr);
8614  if(strLen > 0)
8615  {
8616  const size_t oldCount = m_Data.size();
8617  m_Data.resize(oldCount + strLen);
8618  memcpy(m_Data.data() + oldCount, pStr, strLen);
8619  }
8620 }
8621 
8622 void VmaStringBuilder::AddNumber(uint32_t num)
8623 {
8624  char buf[11];
8625  buf[10] = '\0';
8626  char *p = &buf[10];
8627  do
8628  {
8629  *--p = '0' + (num % 10);
8630  num /= 10;
8631  }
8632  while(num);
8633  Add(p);
8634 }
8635 
8636 void VmaStringBuilder::AddNumber(uint64_t num)
8637 {
8638  char buf[21];
8639  buf[20] = '\0';
8640  char *p = &buf[20];
8641  do
8642  {
8643  *--p = '0' + (num % 10);
8644  num /= 10;
8645  }
8646  while(num);
8647  Add(p);
8648 }
8649 
8650 void VmaStringBuilder::AddPointer(const void* ptr)
8651 {
8652  char buf[21];
8653  VmaPtrToStr(buf, sizeof(buf), ptr);
8654  Add(buf);
8655 }
8656 
8657 #endif // #if VMA_STATS_STRING_ENABLED
8658 
8660 // VmaJsonWriter
8661 
8662 #if VMA_STATS_STRING_ENABLED
8663 
8664 class VmaJsonWriter
8665 {
8666  VMA_CLASS_NO_COPY(VmaJsonWriter)
8667 public:
8668  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
8669  ~VmaJsonWriter();
8670 
8671  void BeginObject(bool singleLine = false);
8672  void EndObject();
8673 
8674  void BeginArray(bool singleLine = false);
8675  void EndArray();
8676 
8677  void WriteString(const char* pStr);
8678  void BeginString(const char* pStr = VMA_NULL);
8679  void ContinueString(const char* pStr);
8680  void ContinueString(uint32_t n);
8681  void ContinueString(uint64_t n);
8682  void ContinueString_Pointer(const void* ptr);
8683  void EndString(const char* pStr = VMA_NULL);
8684 
8685  void WriteNumber(uint32_t n);
8686  void WriteNumber(uint64_t n);
8687  void WriteBool(bool b);
8688  void WriteNull();
8689 
8690 private:
8691  static const char* const INDENT;
8692 
8693  enum COLLECTION_TYPE
8694  {
8695  COLLECTION_TYPE_OBJECT,
8696  COLLECTION_TYPE_ARRAY,
8697  };
8698  struct StackItem
8699  {
8700  COLLECTION_TYPE type;
8701  uint32_t valueCount;
8702  bool singleLineMode;
8703  };
8704 
8705  VmaStringBuilder& m_SB;
8706  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
8707  bool m_InsideString;
8708 
8709  void BeginValue(bool isString);
8710  void WriteIndent(bool oneLess = false);
8711 };
8712 
8713 const char* const VmaJsonWriter::INDENT = " ";
8714 
8715 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
8716  m_SB(sb),
8717  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
8718  m_InsideString(false)
8719 {
8720 }
8721 
8722 VmaJsonWriter::~VmaJsonWriter()
8723 {
8724  VMA_ASSERT(!m_InsideString);
8725  VMA_ASSERT(m_Stack.empty());
8726 }
8727 
8728 void VmaJsonWriter::BeginObject(bool singleLine)
8729 {
8730  VMA_ASSERT(!m_InsideString);
8731 
8732  BeginValue(false);
8733  m_SB.Add('{');
8734 
8735  StackItem item;
8736  item.type = COLLECTION_TYPE_OBJECT;
8737  item.valueCount = 0;
8738  item.singleLineMode = singleLine;
8739  m_Stack.push_back(item);
8740 }
8741 
8742 void VmaJsonWriter::EndObject()
8743 {
8744  VMA_ASSERT(!m_InsideString);
8745 
8746  WriteIndent(true);
8747  m_SB.Add('}');
8748 
8749  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
8750  m_Stack.pop_back();
8751 }
8752 
8753 void VmaJsonWriter::BeginArray(bool singleLine)
8754 {
8755  VMA_ASSERT(!m_InsideString);
8756 
8757  BeginValue(false);
8758  m_SB.Add('[');
8759 
8760  StackItem item;
8761  item.type = COLLECTION_TYPE_ARRAY;
8762  item.valueCount = 0;
8763  item.singleLineMode = singleLine;
8764  m_Stack.push_back(item);
8765 }
8766 
8767 void VmaJsonWriter::EndArray()
8768 {
8769  VMA_ASSERT(!m_InsideString);
8770 
8771  WriteIndent(true);
8772  m_SB.Add(']');
8773 
8774  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
8775  m_Stack.pop_back();
8776 }
8777 
8778 void VmaJsonWriter::WriteString(const char* pStr)
8779 {
8780  BeginString(pStr);
8781  EndString();
8782 }
8783 
8784 void VmaJsonWriter::BeginString(const char* pStr)
8785 {
8786  VMA_ASSERT(!m_InsideString);
8787 
8788  BeginValue(true);
8789  m_SB.Add('"');
8790  m_InsideString = true;
8791  if(pStr != VMA_NULL && pStr[0] != '\0')
8792  {
8793  ContinueString(pStr);
8794  }
8795 }
8796 
8797 void VmaJsonWriter::ContinueString(const char* pStr)
8798 {
8799  VMA_ASSERT(m_InsideString);
8800 
8801  const size_t strLen = strlen(pStr);
8802  for(size_t i = 0; i < strLen; ++i)
8803  {
8804  char ch = pStr[i];
8805  if(ch == '\\')
8806  {
8807  m_SB.Add("\\\\");
8808  }
8809  else if(ch == '"')
8810  {
8811  m_SB.Add("\\\"");
8812  }
8813  else if(ch >= 32)
8814  {
8815  m_SB.Add(ch);
8816  }
8817  else switch(ch)
8818  {
8819  case '\b':
8820  m_SB.Add("\\b");
8821  break;
8822  case '\f':
8823  m_SB.Add("\\f");
8824  break;
8825  case '\n':
8826  m_SB.Add("\\n");
8827  break;
8828  case '\r':
8829  m_SB.Add("\\r");
8830  break;
8831  case '\t':
8832  m_SB.Add("\\t");
8833  break;
8834  default:
8835  VMA_ASSERT(0 && "Character not currently supported.");
8836  break;
8837  }
8838  }
8839 }
8840 
8841 void VmaJsonWriter::ContinueString(uint32_t n)
8842 {
8843  VMA_ASSERT(m_InsideString);
8844  m_SB.AddNumber(n);
8845 }
8846 
8847 void VmaJsonWriter::ContinueString(uint64_t n)
8848 {
8849  VMA_ASSERT(m_InsideString);
8850  m_SB.AddNumber(n);
8851 }
8852 
8853 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
8854 {
8855  VMA_ASSERT(m_InsideString);
8856  m_SB.AddPointer(ptr);
8857 }
8858 
8859 void VmaJsonWriter::EndString(const char* pStr)
8860 {
8861  VMA_ASSERT(m_InsideString);
8862  if(pStr != VMA_NULL && pStr[0] != '\0')
8863  {
8864  ContinueString(pStr);
8865  }
8866  m_SB.Add('"');
8867  m_InsideString = false;
8868 }
8869 
8870 void VmaJsonWriter::WriteNumber(uint32_t n)
8871 {
8872  VMA_ASSERT(!m_InsideString);
8873  BeginValue(false);
8874  m_SB.AddNumber(n);
8875 }
8876 
8877 void VmaJsonWriter::WriteNumber(uint64_t n)
8878 {
8879  VMA_ASSERT(!m_InsideString);
8880  BeginValue(false);
8881  m_SB.AddNumber(n);
8882 }
8883 
8884 void VmaJsonWriter::WriteBool(bool b)
8885 {
8886  VMA_ASSERT(!m_InsideString);
8887  BeginValue(false);
8888  m_SB.Add(b ? "true" : "false");
8889 }
8890 
8891 void VmaJsonWriter::WriteNull()
8892 {
8893  VMA_ASSERT(!m_InsideString);
8894  BeginValue(false);
8895  m_SB.Add("null");
8896 }
8897 
8898 void VmaJsonWriter::BeginValue(bool isString)
8899 {
8900  if(!m_Stack.empty())
8901  {
8902  StackItem& currItem = m_Stack.back();
8903  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8904  currItem.valueCount % 2 == 0)
8905  {
8906  VMA_ASSERT(isString);
8907  }
8908 
8909  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8910  currItem.valueCount % 2 != 0)
8911  {
8912  m_SB.Add(": ");
8913  }
8914  else if(currItem.valueCount > 0)
8915  {
8916  m_SB.Add(", ");
8917  WriteIndent();
8918  }
8919  else
8920  {
8921  WriteIndent();
8922  }
8923  ++currItem.valueCount;
8924  }
8925 }
8926 
8927 void VmaJsonWriter::WriteIndent(bool oneLess)
8928 {
8929  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
8930  {
8931  m_SB.AddNewLine();
8932 
8933  size_t count = m_Stack.size();
8934  if(count > 0 && oneLess)
8935  {
8936  --count;
8937  }
8938  for(size_t i = 0; i < count; ++i)
8939  {
8940  m_SB.Add(INDENT);
8941  }
8942  }
8943 }
8944 
8945 #endif // #if VMA_STATS_STRING_ENABLED
8946 
8948 
8949 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
8950 {
8951  if(IsUserDataString())
8952  {
8953  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
8954 
8955  FreeUserDataString(hAllocator);
8956 
8957  if(pUserData != VMA_NULL)
8958  {
8959  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
8960  }
8961  }
8962  else
8963  {
8964  m_pUserData = pUserData;
8965  }
8966 }
8967 
8968 void VmaAllocation_T::ChangeBlockAllocation(
8969  VmaAllocator hAllocator,
8970  VmaDeviceMemoryBlock* block,
8971  VkDeviceSize offset)
8972 {
8973  VMA_ASSERT(block != VMA_NULL);
8974  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8975 
8976  // Move mapping reference counter from old block to new block.
8977  if(block != m_BlockAllocation.m_Block)
8978  {
8979  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8980  if(IsPersistentMap())
8981  ++mapRefCount;
8982  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8983  block->Map(hAllocator, mapRefCount, VMA_NULL);
8984  }
8985 
8986  m_BlockAllocation.m_Block = block;
8987  m_BlockAllocation.m_Offset = offset;
8988 }
8989 
8990 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8991 {
8992  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8993  m_BlockAllocation.m_Offset = newOffset;
8994 }
8995 
8996 VkDeviceSize VmaAllocation_T::GetOffset() const
8997 {
8998  switch(m_Type)
8999  {
9000  case ALLOCATION_TYPE_BLOCK:
9001  return m_BlockAllocation.m_Offset;
9002  case ALLOCATION_TYPE_DEDICATED:
9003  return 0;
9004  default:
9005  VMA_ASSERT(0);
9006  return 0;
9007  }
9008 }
9009 
9010 VkDeviceMemory VmaAllocation_T::GetMemory() const
9011 {
9012  switch(m_Type)
9013  {
9014  case ALLOCATION_TYPE_BLOCK:
9015  return m_BlockAllocation.m_Block->GetDeviceMemory();
9016  case ALLOCATION_TYPE_DEDICATED:
9017  return m_DedicatedAllocation.m_hMemory;
9018  default:
9019  VMA_ASSERT(0);
9020  return VK_NULL_HANDLE;
9021  }
9022 }
9023 
9024 void* VmaAllocation_T::GetMappedData() const
9025 {
9026  switch(m_Type)
9027  {
9028  case ALLOCATION_TYPE_BLOCK:
9029  if(m_MapCount != 0)
9030  {
9031  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
9032  VMA_ASSERT(pBlockData != VMA_NULL);
9033  return (char*)pBlockData + m_BlockAllocation.m_Offset;
9034  }
9035  else
9036  {
9037  return VMA_NULL;
9038  }
9039  break;
9040  case ALLOCATION_TYPE_DEDICATED:
9041  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
9042  return m_DedicatedAllocation.m_pMappedData;
9043  default:
9044  VMA_ASSERT(0);
9045  return VMA_NULL;
9046  }
9047 }
9048 
9049 bool VmaAllocation_T::CanBecomeLost() const
9050 {
9051  switch(m_Type)
9052  {
9053  case ALLOCATION_TYPE_BLOCK:
9054  return m_BlockAllocation.m_CanBecomeLost;
9055  case ALLOCATION_TYPE_DEDICATED:
9056  return false;
9057  default:
9058  VMA_ASSERT(0);
9059  return false;
9060  }
9061 }
9062 
9063 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9064 {
9065  VMA_ASSERT(CanBecomeLost());
9066 
9067  /*
9068  Warning: This is a carefully designed algorithm.
9069  Do not modify unless you really know what you're doing :)
9070  */
9071  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
9072  for(;;)
9073  {
9074  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
9075  {
9076  VMA_ASSERT(0);
9077  return false;
9078  }
9079  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
9080  {
9081  return false;
9082  }
9083  else // Last use time earlier than current time.
9084  {
9085  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
9086  {
9087  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
9088  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
9089  return true;
9090  }
9091  }
9092  }
9093 }
9094 
9095 #if VMA_STATS_STRING_ENABLED
9096 
9097 // Correspond to values of enum VmaSuballocationType.
9098 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
9099  "FREE",
9100  "UNKNOWN",
9101  "BUFFER",
9102  "IMAGE_UNKNOWN",
9103  "IMAGE_LINEAR",
9104  "IMAGE_OPTIMAL",
9105 };
9106 
9107 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
9108 {
9109  json.WriteString("Type");
9110  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
9111 
9112  json.WriteString("Size");
9113  json.WriteNumber(m_Size);
9114 
9115  if(m_pUserData != VMA_NULL)
9116  {
9117  json.WriteString("UserData");
9118  if(IsUserDataString())
9119  {
9120  json.WriteString((const char*)m_pUserData);
9121  }
9122  else
9123  {
9124  json.BeginString();
9125  json.ContinueString_Pointer(m_pUserData);
9126  json.EndString();
9127  }
9128  }
9129 
9130  json.WriteString("CreationFrameIndex");
9131  json.WriteNumber(m_CreationFrameIndex);
9132 
9133  json.WriteString("LastUseFrameIndex");
9134  json.WriteNumber(GetLastUseFrameIndex());
9135 
9136  if(m_BufferImageUsage != 0)
9137  {
9138  json.WriteString("Usage");
9139  json.WriteNumber(m_BufferImageUsage);
9140  }
9141 }
9142 
9143 #endif
9144 
9145 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
9146 {
9147  VMA_ASSERT(IsUserDataString());
9148  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
9149  m_pUserData = VMA_NULL;
9150 }
9151 
9152 void VmaAllocation_T::BlockAllocMap()
9153 {
9154  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
9155 
9156  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
9157  {
9158  ++m_MapCount;
9159  }
9160  else
9161  {
9162  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
9163  }
9164 }
9165 
9166 void VmaAllocation_T::BlockAllocUnmap()
9167 {
9168  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
9169 
9170  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
9171  {
9172  --m_MapCount;
9173  }
9174  else
9175  {
9176  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
9177  }
9178 }
9179 
9180 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
9181 {
9182  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
9183 
9184  if(m_MapCount != 0)
9185  {
9186  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
9187  {
9188  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
9189  *ppData = m_DedicatedAllocation.m_pMappedData;
9190  ++m_MapCount;
9191  return VK_SUCCESS;
9192  }
9193  else
9194  {
9195  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
9196  return VK_ERROR_MEMORY_MAP_FAILED;
9197  }
9198  }
9199  else
9200  {
9201  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9202  hAllocator->m_hDevice,
9203  m_DedicatedAllocation.m_hMemory,
9204  0, // offset
9205  VK_WHOLE_SIZE,
9206  0, // flags
9207  ppData);
9208  if(result == VK_SUCCESS)
9209  {
9210  m_DedicatedAllocation.m_pMappedData = *ppData;
9211  m_MapCount = 1;
9212  }
9213  return result;
9214  }
9215 }
9216 
9217 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
9218 {
9219  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
9220 
9221  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
9222  {
9223  --m_MapCount;
9224  if(m_MapCount == 0)
9225  {
9226  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
9227  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
9228  hAllocator->m_hDevice,
9229  m_DedicatedAllocation.m_hMemory);
9230  }
9231  }
9232  else
9233  {
9234  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
9235  }
9236 }
9237 
9238 #if VMA_STATS_STRING_ENABLED
9239 
9240 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
9241 {
9242  json.BeginObject();
9243 
9244  json.WriteString("Blocks");
9245  json.WriteNumber(stat.blockCount);
9246 
9247  json.WriteString("Allocations");
9248  json.WriteNumber(stat.allocationCount);
9249 
9250  json.WriteString("UnusedRanges");
9251  json.WriteNumber(stat.unusedRangeCount);
9252 
9253  json.WriteString("UsedBytes");
9254  json.WriteNumber(stat.usedBytes);
9255 
9256  json.WriteString("UnusedBytes");
9257  json.WriteNumber(stat.unusedBytes);
9258 
9259  if(stat.allocationCount > 1)
9260  {
9261  json.WriteString("AllocationSize");
9262  json.BeginObject(true);
9263  json.WriteString("Min");
9264  json.WriteNumber(stat.allocationSizeMin);
9265  json.WriteString("Avg");
9266  json.WriteNumber(stat.allocationSizeAvg);
9267  json.WriteString("Max");
9268  json.WriteNumber(stat.allocationSizeMax);
9269  json.EndObject();
9270  }
9271 
9272  if(stat.unusedRangeCount > 1)
9273  {
9274  json.WriteString("UnusedRangeSize");
9275  json.BeginObject(true);
9276  json.WriteString("Min");
9277  json.WriteNumber(stat.unusedRangeSizeMin);
9278  json.WriteString("Avg");
9279  json.WriteNumber(stat.unusedRangeSizeAvg);
9280  json.WriteString("Max");
9281  json.WriteNumber(stat.unusedRangeSizeMax);
9282  json.EndObject();
9283  }
9284 
9285  json.EndObject();
9286 }
9287 
9288 #endif // #if VMA_STATS_STRING_ENABLED
9289 
9290 struct VmaSuballocationItemSizeLess
9291 {
9292  bool operator()(
9293  const VmaSuballocationList::iterator lhs,
9294  const VmaSuballocationList::iterator rhs) const
9295  {
9296  return lhs->size < rhs->size;
9297  }
9298  bool operator()(
9299  const VmaSuballocationList::iterator lhs,
9300  VkDeviceSize rhsSize) const
9301  {
9302  return lhs->size < rhsSize;
9303  }
9304 };
9305 
9306 
9308 // class VmaBlockMetadata
9309 
9310 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
9311  m_Size(0),
9312  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
9313 {
9314 }
9315 
9316 #if VMA_STATS_STRING_ENABLED
9317 
9318 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
9319  VkDeviceSize unusedBytes,
9320  size_t allocationCount,
9321  size_t unusedRangeCount) const
9322 {
9323  json.BeginObject();
9324 
9325  json.WriteString("TotalBytes");
9326  json.WriteNumber(GetSize());
9327 
9328  json.WriteString("UnusedBytes");
9329  json.WriteNumber(unusedBytes);
9330 
9331  json.WriteString("Allocations");
9332  json.WriteNumber((uint64_t)allocationCount);
9333 
9334  json.WriteString("UnusedRanges");
9335  json.WriteNumber((uint64_t)unusedRangeCount);
9336 
9337  json.WriteString("Suballocations");
9338  json.BeginArray();
9339 }
9340 
9341 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
9342  VkDeviceSize offset,
9343  VmaAllocation hAllocation) const
9344 {
9345  json.BeginObject(true);
9346 
9347  json.WriteString("Offset");
9348  json.WriteNumber(offset);
9349 
9350  hAllocation->PrintParameters(json);
9351 
9352  json.EndObject();
9353 }
9354 
9355 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
9356  VkDeviceSize offset,
9357  VkDeviceSize size) const
9358 {
9359  json.BeginObject(true);
9360 
9361  json.WriteString("Offset");
9362  json.WriteNumber(offset);
9363 
9364  json.WriteString("Type");
9365  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
9366 
9367  json.WriteString("Size");
9368  json.WriteNumber(size);
9369 
9370  json.EndObject();
9371 }
9372 
9373 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
9374 {
9375  json.EndArray();
9376  json.EndObject();
9377 }
9378 
9379 #endif // #if VMA_STATS_STRING_ENABLED
9380 
9382 // class VmaBlockMetadata_Generic
9383 
9384 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
9385  VmaBlockMetadata(hAllocator),
9386  m_FreeCount(0),
9387  m_SumFreeSize(0),
9388  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9389  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
9390 {
9391 }
9392 
9393 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
9394 {
9395 }
9396 
9397 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
9398 {
9399  VmaBlockMetadata::Init(size);
9400 
9401  m_FreeCount = 1;
9402  m_SumFreeSize = size;
9403 
9404  VmaSuballocation suballoc = {};
9405  suballoc.offset = 0;
9406  suballoc.size = size;
9407  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9408  suballoc.hAllocation = VK_NULL_HANDLE;
9409 
9410  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9411  m_Suballocations.push_back(suballoc);
9412  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
9413  --suballocItem;
9414  m_FreeSuballocationsBySize.push_back(suballocItem);
9415 }
9416 
9417 bool VmaBlockMetadata_Generic::Validate() const
9418 {
9419  VMA_VALIDATE(!m_Suballocations.empty());
9420 
9421  // Expected offset of new suballocation as calculated from previous ones.
9422  VkDeviceSize calculatedOffset = 0;
9423  // Expected number of free suballocations as calculated from traversing their list.
9424  uint32_t calculatedFreeCount = 0;
9425  // Expected sum size of free suballocations as calculated from traversing their list.
9426  VkDeviceSize calculatedSumFreeSize = 0;
9427  // Expected number of free suballocations that should be registered in
9428  // m_FreeSuballocationsBySize calculated from traversing their list.
9429  size_t freeSuballocationsToRegister = 0;
9430  // True if previous visited suballocation was free.
9431  bool prevFree = false;
9432 
9433  for(const auto& subAlloc : m_Suballocations)
9434  {
9435  // Actual offset of this suballocation doesn't match expected one.
9436  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
9437 
9438  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
9439  // Two adjacent free suballocations are invalid. They should be merged.
9440  VMA_VALIDATE(!prevFree || !currFree);
9441 
9442  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
9443 
9444  if(currFree)
9445  {
9446  calculatedSumFreeSize += subAlloc.size;
9447  ++calculatedFreeCount;
9448  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9449  {
9450  ++freeSuballocationsToRegister;
9451  }
9452 
9453  // Margin required between allocations - every free space must be at least that large.
9454  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
9455  }
9456  else
9457  {
9458  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
9459  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
9460 
9461  // Margin required between allocations - previous allocation must be free.
9462  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
9463  }
9464 
9465  calculatedOffset += subAlloc.size;
9466  prevFree = currFree;
9467  }
9468 
9469  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
9470  // match expected one.
9471  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
9472 
9473  VkDeviceSize lastSize = 0;
9474  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
9475  {
9476  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
9477 
9478  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
9479  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9480  // They must be sorted by size ascending.
9481  VMA_VALIDATE(suballocItem->size >= lastSize);
9482 
9483  lastSize = suballocItem->size;
9484  }
9485 
9486  // Check if totals match calculated values.
9487  VMA_VALIDATE(ValidateFreeSuballocationList());
9488  VMA_VALIDATE(calculatedOffset == GetSize());
9489  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
9490  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
9491 
9492  return true;
9493 }
9494 
9495 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
9496 {
9497  if(!m_FreeSuballocationsBySize.empty())
9498  {
9499  return m_FreeSuballocationsBySize.back()->size;
9500  }
9501  else
9502  {
9503  return 0;
9504  }
9505 }
9506 
9507 bool VmaBlockMetadata_Generic::IsEmpty() const
9508 {
9509  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
9510 }
9511 
9512 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9513 {
9514  outInfo.blockCount = 1;
9515 
9516  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9517  outInfo.allocationCount = rangeCount - m_FreeCount;
9518  outInfo.unusedRangeCount = m_FreeCount;
9519 
9520  outInfo.unusedBytes = m_SumFreeSize;
9521  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
9522 
9523  outInfo.allocationSizeMin = UINT64_MAX;
9524  outInfo.allocationSizeMax = 0;
9525  outInfo.unusedRangeSizeMin = UINT64_MAX;
9526  outInfo.unusedRangeSizeMax = 0;
9527 
9528  for(const auto& suballoc : m_Suballocations)
9529  {
9530  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9531  {
9532  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9533  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
9534  }
9535  else
9536  {
9537  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
9538  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
9539  }
9540  }
9541 }
9542 
9543 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
9544 {
9545  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9546 
9547  inoutStats.size += GetSize();
9548  inoutStats.unusedSize += m_SumFreeSize;
9549  inoutStats.allocationCount += rangeCount - m_FreeCount;
9550  inoutStats.unusedRangeCount += m_FreeCount;
9551  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9552 }
9553 
9554 #if VMA_STATS_STRING_ENABLED
9555 
9556 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
9557 {
9558  PrintDetailedMap_Begin(json,
9559  m_SumFreeSize, // unusedBytes
9560  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
9561  m_FreeCount); // unusedRangeCount
9562 
9563  for(const auto& suballoc : m_Suballocations)
9564  {
9565  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
9566  {
9567  PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
9568  }
9569  else
9570  {
9571  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
9572  }
9573  }
9574 
9575  PrintDetailedMap_End(json);
9576 }
9577 
9578 #endif // #if VMA_STATS_STRING_ENABLED
9579 
9580 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
9581  uint32_t currentFrameIndex,
9582  uint32_t frameInUseCount,
9583  VkDeviceSize bufferImageGranularity,
9584  VkDeviceSize allocSize,
9585  VkDeviceSize allocAlignment,
9586  bool upperAddress,
9587  VmaSuballocationType allocType,
9588  bool canMakeOtherLost,
9589  uint32_t strategy,
9590  VmaAllocationRequest* pAllocationRequest)
9591 {
9592  VMA_ASSERT(allocSize > 0);
9593  VMA_ASSERT(!upperAddress);
9594  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9595  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9596  VMA_HEAVY_ASSERT(Validate());
9597 
9598  pAllocationRequest->type = VmaAllocationRequestType::Normal;
9599 
9600  // There is not enough total free space in this block to fullfill the request: Early return.
9601  if(canMakeOtherLost == false &&
9602  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
9603  {
9604  return false;
9605  }
9606 
9607  // New algorithm, efficiently searching freeSuballocationsBySize.
9608  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
9609  if(freeSuballocCount > 0)
9610  {
9612  {
9613  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
9614  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9615  m_FreeSuballocationsBySize.data(),
9616  m_FreeSuballocationsBySize.data() + freeSuballocCount,
9617  allocSize + 2 * VMA_DEBUG_MARGIN,
9618  VmaSuballocationItemSizeLess());
9619  size_t index = it - m_FreeSuballocationsBySize.data();
9620  for(; index < freeSuballocCount; ++index)
9621  {
9622  if(CheckAllocation(
9623  currentFrameIndex,
9624  frameInUseCount,
9625  bufferImageGranularity,
9626  allocSize,
9627  allocAlignment,
9628  allocType,
9629  m_FreeSuballocationsBySize[index],
9630  false, // canMakeOtherLost
9631  &pAllocationRequest->offset,
9632  &pAllocationRequest->itemsToMakeLostCount,
9633  &pAllocationRequest->sumFreeSize,
9634  &pAllocationRequest->sumItemSize))
9635  {
9636  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9637  return true;
9638  }
9639  }
9640  }
9641  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
9642  {
9643  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9644  it != m_Suballocations.end();
9645  ++it)
9646  {
9647  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
9648  currentFrameIndex,
9649  frameInUseCount,
9650  bufferImageGranularity,
9651  allocSize,
9652  allocAlignment,
9653  allocType,
9654  it,
9655  false, // canMakeOtherLost
9656  &pAllocationRequest->offset,
9657  &pAllocationRequest->itemsToMakeLostCount,
9658  &pAllocationRequest->sumFreeSize,
9659  &pAllocationRequest->sumItemSize))
9660  {
9661  pAllocationRequest->item = it;
9662  return true;
9663  }
9664  }
9665  }
9666  else // WORST_FIT, FIRST_FIT
9667  {
9668  // Search staring from biggest suballocations.
9669  for(size_t index = freeSuballocCount; index--; )
9670  {
9671  if(CheckAllocation(
9672  currentFrameIndex,
9673  frameInUseCount,
9674  bufferImageGranularity,
9675  allocSize,
9676  allocAlignment,
9677  allocType,
9678  m_FreeSuballocationsBySize[index],
9679  false, // canMakeOtherLost
9680  &pAllocationRequest->offset,
9681  &pAllocationRequest->itemsToMakeLostCount,
9682  &pAllocationRequest->sumFreeSize,
9683  &pAllocationRequest->sumItemSize))
9684  {
9685  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9686  return true;
9687  }
9688  }
9689  }
9690  }
9691 
9692  if(canMakeOtherLost)
9693  {
9694  // Brute-force algorithm. TODO: Come up with something better.
9695 
9696  bool found = false;
9697  VmaAllocationRequest tmpAllocRequest = {};
9698  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
9699  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
9700  suballocIt != m_Suballocations.end();
9701  ++suballocIt)
9702  {
9703  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
9704  suballocIt->hAllocation->CanBecomeLost())
9705  {
9706  if(CheckAllocation(
9707  currentFrameIndex,
9708  frameInUseCount,
9709  bufferImageGranularity,
9710  allocSize,
9711  allocAlignment,
9712  allocType,
9713  suballocIt,
9714  canMakeOtherLost,
9715  &tmpAllocRequest.offset,
9716  &tmpAllocRequest.itemsToMakeLostCount,
9717  &tmpAllocRequest.sumFreeSize,
9718  &tmpAllocRequest.sumItemSize))
9719  {
9721  {
9722  *pAllocationRequest = tmpAllocRequest;
9723  pAllocationRequest->item = suballocIt;
9724  break;
9725  }
9726  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
9727  {
9728  *pAllocationRequest = tmpAllocRequest;
9729  pAllocationRequest->item = suballocIt;
9730  found = true;
9731  }
9732  }
9733  }
9734  }
9735 
9736  return found;
9737  }
9738 
9739  return false;
9740 }
9741 
9742 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
9743  uint32_t currentFrameIndex,
9744  uint32_t frameInUseCount,
9745  VmaAllocationRequest* pAllocationRequest)
9746 {
9747  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
9748 
9749  while(pAllocationRequest->itemsToMakeLostCount > 0)
9750  {
9751  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
9752  {
9753  ++pAllocationRequest->item;
9754  }
9755  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9756  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
9757  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
9758  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9759  {
9760  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
9761  --pAllocationRequest->itemsToMakeLostCount;
9762  }
9763  else
9764  {
9765  return false;
9766  }
9767  }
9768 
9769  VMA_HEAVY_ASSERT(Validate());
9770  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9771  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
9772 
9773  return true;
9774 }
9775 
9776 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9777 {
9778  uint32_t lostAllocationCount = 0;
9779  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9780  it != m_Suballocations.end();
9781  ++it)
9782  {
9783  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
9784  it->hAllocation->CanBecomeLost() &&
9785  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9786  {
9787  it = FreeSuballocation(it);
9788  ++lostAllocationCount;
9789  }
9790  }
9791  return lostAllocationCount;
9792 }
9793 
9794 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
9795 {
9796  for(auto& suballoc : m_Suballocations)
9797  {
9798  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9799  {
9800  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
9801  {
9802  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9803  return VK_ERROR_VALIDATION_FAILED_EXT;
9804  }
9805  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
9806  {
9807  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9808  return VK_ERROR_VALIDATION_FAILED_EXT;
9809  }
9810  }
9811  }
9812 
9813  return VK_SUCCESS;
9814 }
9815 
9816 void VmaBlockMetadata_Generic::Alloc(
9817  const VmaAllocationRequest& request,
9818  VmaSuballocationType type,
9819  VkDeviceSize allocSize,
9820  VmaAllocation hAllocation)
9821 {
9822  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
9823  VMA_ASSERT(request.item != m_Suballocations.end());
9824  VmaSuballocation& suballoc = *request.item;
9825  // Given suballocation is a free block.
9826  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9827  // Given offset is inside this suballocation.
9828  VMA_ASSERT(request.offset >= suballoc.offset);
9829  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
9830  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
9831  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
9832 
9833  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
9834  // it to become used.
9835  UnregisterFreeSuballocation(request.item);
9836 
9837  suballoc.offset = request.offset;
9838  suballoc.size = allocSize;
9839  suballoc.type = type;
9840  suballoc.hAllocation = hAllocation;
9841 
9842  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
9843  if(paddingEnd)
9844  {
9845  VmaSuballocation paddingSuballoc = {};
9846  paddingSuballoc.offset = request.offset + allocSize;
9847  paddingSuballoc.size = paddingEnd;
9848  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9849  VmaSuballocationList::iterator next = request.item;
9850  ++next;
9851  const VmaSuballocationList::iterator paddingEndItem =
9852  m_Suballocations.insert(next, paddingSuballoc);
9853  RegisterFreeSuballocation(paddingEndItem);
9854  }
9855 
9856  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
9857  if(paddingBegin)
9858  {
9859  VmaSuballocation paddingSuballoc = {};
9860  paddingSuballoc.offset = request.offset - paddingBegin;
9861  paddingSuballoc.size = paddingBegin;
9862  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9863  const VmaSuballocationList::iterator paddingBeginItem =
9864  m_Suballocations.insert(request.item, paddingSuballoc);
9865  RegisterFreeSuballocation(paddingBeginItem);
9866  }
9867 
9868  // Update totals.
9869  m_FreeCount = m_FreeCount - 1;
9870  if(paddingBegin > 0)
9871  {
9872  ++m_FreeCount;
9873  }
9874  if(paddingEnd > 0)
9875  {
9876  ++m_FreeCount;
9877  }
9878  m_SumFreeSize -= allocSize;
9879 }
9880 
9881 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
9882 {
9883  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9884  suballocItem != m_Suballocations.end();
9885  ++suballocItem)
9886  {
9887  VmaSuballocation& suballoc = *suballocItem;
9888  if(suballoc.hAllocation == allocation)
9889  {
9890  FreeSuballocation(suballocItem);
9891  VMA_HEAVY_ASSERT(Validate());
9892  return;
9893  }
9894  }
9895  VMA_ASSERT(0 && "Not found!");
9896 }
9897 
9898 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
9899 {
9900  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9901  suballocItem != m_Suballocations.end();
9902  ++suballocItem)
9903  {
9904  VmaSuballocation& suballoc = *suballocItem;
9905  if(suballoc.offset == offset)
9906  {
9907  FreeSuballocation(suballocItem);
9908  return;
9909  }
9910  }
9911  VMA_ASSERT(0 && "Not found!");
9912 }
9913 
9914 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
9915 {
9916  VkDeviceSize lastSize = 0;
9917  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
9918  {
9919  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
9920 
9921  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
9922  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9923  VMA_VALIDATE(it->size >= lastSize);
9924  lastSize = it->size;
9925  }
9926  return true;
9927 }
9928 
9929 bool VmaBlockMetadata_Generic::CheckAllocation(
9930  uint32_t currentFrameIndex,
9931  uint32_t frameInUseCount,
9932  VkDeviceSize bufferImageGranularity,
9933  VkDeviceSize allocSize,
9934  VkDeviceSize allocAlignment,
9935  VmaSuballocationType allocType,
9936  VmaSuballocationList::const_iterator suballocItem,
9937  bool canMakeOtherLost,
9938  VkDeviceSize* pOffset,
9939  size_t* itemsToMakeLostCount,
9940  VkDeviceSize* pSumFreeSize,
9941  VkDeviceSize* pSumItemSize) const
9942 {
9943  VMA_ASSERT(allocSize > 0);
9944  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9945  VMA_ASSERT(suballocItem != m_Suballocations.cend());
9946  VMA_ASSERT(pOffset != VMA_NULL);
9947 
9948  *itemsToMakeLostCount = 0;
9949  *pSumFreeSize = 0;
9950  *pSumItemSize = 0;
9951 
9952  if(canMakeOtherLost)
9953  {
9954  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9955  {
9956  *pSumFreeSize = suballocItem->size;
9957  }
9958  else
9959  {
9960  if(suballocItem->hAllocation->CanBecomeLost() &&
9961  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9962  {
9963  ++*itemsToMakeLostCount;
9964  *pSumItemSize = suballocItem->size;
9965  }
9966  else
9967  {
9968  return false;
9969  }
9970  }
9971 
9972  // Remaining size is too small for this request: Early return.
9973  if(GetSize() - suballocItem->offset < allocSize)
9974  {
9975  return false;
9976  }
9977 
9978  // Start from offset equal to beginning of this suballocation.
9979  *pOffset = suballocItem->offset;
9980 
9981  // Apply VMA_DEBUG_MARGIN at the beginning.
9982  if(VMA_DEBUG_MARGIN > 0)
9983  {
9984  *pOffset += VMA_DEBUG_MARGIN;
9985  }
9986 
9987  // Apply alignment.
9988  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9989 
9990  // Check previous suballocations for BufferImageGranularity conflicts.
9991  // Make bigger alignment if necessary.
9992  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
9993  {
9994  bool bufferImageGranularityConflict = false;
9995  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9996  while(prevSuballocItem != m_Suballocations.cbegin())
9997  {
9998  --prevSuballocItem;
9999  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
10000  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
10001  {
10002  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10003  {
10004  bufferImageGranularityConflict = true;
10005  break;
10006  }
10007  }
10008  else
10009  // Already on previous page.
10010  break;
10011  }
10012  if(bufferImageGranularityConflict)
10013  {
10014  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
10015  }
10016  }
10017 
10018  // Now that we have final *pOffset, check if we are past suballocItem.
10019  // If yes, return false - this function should be called for another suballocItem as starting point.
10020  if(*pOffset >= suballocItem->offset + suballocItem->size)
10021  {
10022  return false;
10023  }
10024 
10025  // Calculate padding at the beginning based on current offset.
10026  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
10027 
10028  // Calculate required margin at the end.
10029  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
10030 
10031  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
10032  // Another early return check.
10033  if(suballocItem->offset + totalSize > GetSize())
10034  {
10035  return false;
10036  }
10037 
10038  // Advance lastSuballocItem until desired size is reached.
10039  // Update itemsToMakeLostCount.
10040  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
10041  if(totalSize > suballocItem->size)
10042  {
10043  VkDeviceSize remainingSize = totalSize - suballocItem->size;
10044  while(remainingSize > 0)
10045  {
10046  ++lastSuballocItem;
10047  if(lastSuballocItem == m_Suballocations.cend())
10048  {
10049  return false;
10050  }
10051  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
10052  {
10053  *pSumFreeSize += lastSuballocItem->size;
10054  }
10055  else
10056  {
10057  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
10058  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
10059  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10060  {
10061  ++*itemsToMakeLostCount;
10062  *pSumItemSize += lastSuballocItem->size;
10063  }
10064  else
10065  {
10066  return false;
10067  }
10068  }
10069  remainingSize = (lastSuballocItem->size < remainingSize) ?
10070  remainingSize - lastSuballocItem->size : 0;
10071  }
10072  }
10073 
10074  // Check next suballocations for BufferImageGranularity conflicts.
10075  // If conflict exists, we must mark more allocations lost or fail.
10076  if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
10077  {
10078  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
10079  ++nextSuballocItem;
10080  while(nextSuballocItem != m_Suballocations.cend())
10081  {
10082  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
10083  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10084  {
10085  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10086  {
10087  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
10088  if(nextSuballoc.hAllocation->CanBecomeLost() &&
10089  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10090  {
10091  ++*itemsToMakeLostCount;
10092  }
10093  else
10094  {
10095  return false;
10096  }
10097  }
10098  }
10099  else
10100  {
10101  // Already on next page.
10102  break;
10103  }
10104  ++nextSuballocItem;
10105  }
10106  }
10107  }
10108  else
10109  {
10110  const VmaSuballocation& suballoc = *suballocItem;
10111  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10112 
10113  *pSumFreeSize = suballoc.size;
10114 
10115  // Size of this suballocation is too small for this request: Early return.
10116  if(suballoc.size < allocSize)
10117  {
10118  return false;
10119  }
10120 
10121  // Start from offset equal to beginning of this suballocation.
10122  *pOffset = suballoc.offset;
10123 
10124  // Apply VMA_DEBUG_MARGIN at the beginning.
10125  if(VMA_DEBUG_MARGIN > 0)
10126  {
10127  *pOffset += VMA_DEBUG_MARGIN;
10128  }
10129 
10130  // Apply alignment.
10131  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
10132 
10133  // Check previous suballocations for BufferImageGranularity conflicts.
10134  // Make bigger alignment if necessary.
10135  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
10136  {
10137  bool bufferImageGranularityConflict = false;
10138  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
10139  while(prevSuballocItem != m_Suballocations.cbegin())
10140  {
10141  --prevSuballocItem;
10142  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
10143  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
10144  {
10145  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10146  {
10147  bufferImageGranularityConflict = true;
10148  break;
10149  }
10150  }
10151  else
10152  // Already on previous page.
10153  break;
10154  }
10155  if(bufferImageGranularityConflict)
10156  {
10157  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
10158  }
10159  }
10160 
10161  // Calculate padding at the beginning based on current offset.
10162  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
10163 
10164  // Calculate required margin at the end.
10165  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
10166 
10167  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
10168  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
10169  {
10170  return false;
10171  }
10172 
10173  // Check next suballocations for BufferImageGranularity conflicts.
10174  // If conflict exists, allocation cannot be made here.
10175  if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
10176  {
10177  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
10178  ++nextSuballocItem;
10179  while(nextSuballocItem != m_Suballocations.cend())
10180  {
10181  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
10182  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10183  {
10184  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10185  {
10186  return false;
10187  }
10188  }
10189  else
10190  {
10191  // Already on next page.
10192  break;
10193  }
10194  ++nextSuballocItem;
10195  }
10196  }
10197  }
10198 
10199  // All tests passed: Success. pOffset is already filled.
10200  return true;
10201 }
10202 
10203 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
10204 {
10205  VMA_ASSERT(item != m_Suballocations.end());
10206  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
10207 
10208  VmaSuballocationList::iterator nextItem = item;
10209  ++nextItem;
10210  VMA_ASSERT(nextItem != m_Suballocations.end());
10211  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
10212 
10213  item->size += nextItem->size;
10214  --m_FreeCount;
10215  m_Suballocations.erase(nextItem);
10216 }
10217 
10218 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
10219 {
10220  // Change this suballocation to be marked as free.
10221  VmaSuballocation& suballoc = *suballocItem;
10222  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10223  suballoc.hAllocation = VK_NULL_HANDLE;
10224 
10225  // Update totals.
10226  ++m_FreeCount;
10227  m_SumFreeSize += suballoc.size;
10228 
10229  // Merge with previous and/or next suballocation if it's also free.
10230  bool mergeWithNext = false;
10231  bool mergeWithPrev = false;
10232 
10233  VmaSuballocationList::iterator nextItem = suballocItem;
10234  ++nextItem;
10235  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
10236  {
10237  mergeWithNext = true;
10238  }
10239 
10240  VmaSuballocationList::iterator prevItem = suballocItem;
10241  if(suballocItem != m_Suballocations.begin())
10242  {
10243  --prevItem;
10244  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
10245  {
10246  mergeWithPrev = true;
10247  }
10248  }
10249 
10250  if(mergeWithNext)
10251  {
10252  UnregisterFreeSuballocation(nextItem);
10253  MergeFreeWithNext(suballocItem);
10254  }
10255 
10256  if(mergeWithPrev)
10257  {
10258  UnregisterFreeSuballocation(prevItem);
10259  MergeFreeWithNext(prevItem);
10260  RegisterFreeSuballocation(prevItem);
10261  return prevItem;
10262  }
10263  else
10264  {
10265  RegisterFreeSuballocation(suballocItem);
10266  return suballocItem;
10267  }
10268 }
10269 
10270 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
10271 {
10272  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
10273  VMA_ASSERT(item->size > 0);
10274 
10275  // You may want to enable this validation at the beginning or at the end of
10276  // this function, depending on what do you want to check.
10277  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10278 
10279  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
10280  {
10281  if(m_FreeSuballocationsBySize.empty())
10282  {
10283  m_FreeSuballocationsBySize.push_back(item);
10284  }
10285  else
10286  {
10287  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
10288  }
10289  }
10290 
10291  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10292 }
10293 
10294 
10295 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
10296 {
10297  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
10298  VMA_ASSERT(item->size > 0);
10299 
10300  // You may want to enable this validation at the beginning or at the end of
10301  // this function, depending on what do you want to check.
10302  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10303 
10304  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
10305  {
10306  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
10307  m_FreeSuballocationsBySize.data(),
10308  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
10309  item,
10310  VmaSuballocationItemSizeLess());
10311  for(size_t index = it - m_FreeSuballocationsBySize.data();
10312  index < m_FreeSuballocationsBySize.size();
10313  ++index)
10314  {
10315  if(m_FreeSuballocationsBySize[index] == item)
10316  {
10317  VmaVectorRemove(m_FreeSuballocationsBySize, index);
10318  return;
10319  }
10320  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
10321  }
10322  VMA_ASSERT(0 && "Not found.");
10323  }
10324 
10325  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10326 }
10327 
10328 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
10329  VkDeviceSize bufferImageGranularity,
10330  VmaSuballocationType& inOutPrevSuballocType) const
10331 {
10332  if(bufferImageGranularity == 1 || IsEmpty())
10333  {
10334  return false;
10335  }
10336 
10337  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
10338  bool typeConflictFound = false;
10339  for(const auto& suballoc : m_Suballocations)
10340  {
10341  const VmaSuballocationType suballocType = suballoc.type;
10342  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
10343  {
10344  minAlignment = VMA_MIN(minAlignment, suballoc.hAllocation->GetAlignment());
10345  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
10346  {
10347  typeConflictFound = true;
10348  }
10349  inOutPrevSuballocType = suballocType;
10350  }
10351  }
10352 
10353  return typeConflictFound || minAlignment >= bufferImageGranularity;
10354 }
10355 
10357 // class VmaBlockMetadata_Linear
10358 
10359 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
10360  VmaBlockMetadata(hAllocator),
10361  m_SumFreeSize(0),
10362  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
10363  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
10364  m_1stVectorIndex(0),
10365  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
10366  m_1stNullItemsBeginCount(0),
10367  m_1stNullItemsMiddleCount(0),
10368  m_2ndNullItemsCount(0)
10369 {
10370 }
10371 
10372 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
10373 {
10374 }
10375 
10376 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
10377 {
10378  VmaBlockMetadata::Init(size);
10379  m_SumFreeSize = size;
10380 }
10381 
10382 bool VmaBlockMetadata_Linear::Validate() const
10383 {
10384  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10385  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10386 
10387  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
10388  VMA_VALIDATE(!suballocations1st.empty() ||
10389  suballocations2nd.empty() ||
10390  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
10391 
10392  if(!suballocations1st.empty())
10393  {
10394  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
10395  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
10396  // Null item at the end should be just pop_back().
10397  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
10398  }
10399  if(!suballocations2nd.empty())
10400  {
10401  // Null item at the end should be just pop_back().
10402  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
10403  }
10404 
10405  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
10406  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
10407 
10408  VkDeviceSize sumUsedSize = 0;
10409  const size_t suballoc1stCount = suballocations1st.size();
10410  VkDeviceSize offset = VMA_DEBUG_MARGIN;
10411 
10412  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10413  {
10414  const size_t suballoc2ndCount = suballocations2nd.size();
10415  size_t nullItem2ndCount = 0;
10416  for(size_t i = 0; i < suballoc2ndCount; ++i)
10417  {
10418  const VmaSuballocation& suballoc = suballocations2nd[i];
10419  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10420 
10421  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10422  VMA_VALIDATE(suballoc.offset >= offset);
10423 
10424  if(!currFree)
10425  {
10426  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10427  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10428  sumUsedSize += suballoc.size;
10429  }
10430  else
10431  {
10432  ++nullItem2ndCount;
10433  }
10434 
10435  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10436  }
10437 
10438  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10439  }
10440 
10441  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
10442  {
10443  const VmaSuballocation& suballoc = suballocations1st[i];
10444  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
10445  suballoc.hAllocation == VK_NULL_HANDLE);
10446  }
10447 
10448  size_t nullItem1stCount = m_1stNullItemsBeginCount;
10449 
10450  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
10451  {
10452  const VmaSuballocation& suballoc = suballocations1st[i];
10453  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10454 
10455  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10456  VMA_VALIDATE(suballoc.offset >= offset);
10457  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
10458 
10459  if(!currFree)
10460  {
10461  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10462  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10463  sumUsedSize += suballoc.size;
10464  }
10465  else
10466  {
10467  ++nullItem1stCount;
10468  }
10469 
10470  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10471  }
10472  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
10473 
10474  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10475  {
10476  const size_t suballoc2ndCount = suballocations2nd.size();
10477  size_t nullItem2ndCount = 0;
10478  for(size_t i = suballoc2ndCount; i--; )
10479  {
10480  const VmaSuballocation& suballoc = suballocations2nd[i];
10481  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10482 
10483  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10484  VMA_VALIDATE(suballoc.offset >= offset);
10485 
10486  if(!currFree)
10487  {
10488  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10489  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10490  sumUsedSize += suballoc.size;
10491  }
10492  else
10493  {
10494  ++nullItem2ndCount;
10495  }
10496 
10497  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10498  }
10499 
10500  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10501  }
10502 
10503  VMA_VALIDATE(offset <= GetSize());
10504  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
10505 
10506  return true;
10507 }
10508 
10509 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
10510 {
10511  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
10512  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
10513 }
10514 
10515 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
10516 {
10517  const VkDeviceSize size = GetSize();
10518 
10519  /*
10520  We don't consider gaps inside allocation vectors with freed allocations because
10521  they are not suitable for reuse in linear allocator. We consider only space that
10522  is available for new allocations.
10523  */
10524  if(IsEmpty())
10525  {
10526  return size;
10527  }
10528 
10529  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10530 
10531  switch(m_2ndVectorMode)
10532  {
10533  case SECOND_VECTOR_EMPTY:
10534  /*
10535  Available space is after end of 1st, as well as before beginning of 1st (which
10536  would make it a ring buffer).
10537  */
10538  {
10539  const size_t suballocations1stCount = suballocations1st.size();
10540  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
10541  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10542  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
10543  return VMA_MAX(
10544  firstSuballoc.offset,
10545  size - (lastSuballoc.offset + lastSuballoc.size));
10546  }
10547  break;
10548 
10549  case SECOND_VECTOR_RING_BUFFER:
10550  /*
10551  Available space is only between end of 2nd and beginning of 1st.
10552  */
10553  {
10554  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10555  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
10556  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
10557  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
10558  }
10559  break;
10560 
10561  case SECOND_VECTOR_DOUBLE_STACK:
10562  /*
10563  Available space is only between end of 1st and top of 2nd.
10564  */
10565  {
10566  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10567  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
10568  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
10569  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
10570  }
10571  break;
10572 
10573  default:
10574  VMA_ASSERT(0);
10575  return 0;
10576  }
10577 }
10578 
10579 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10580 {
10581  const VkDeviceSize size = GetSize();
10582  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10583  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10584  const size_t suballoc1stCount = suballocations1st.size();
10585  const size_t suballoc2ndCount = suballocations2nd.size();
10586 
10587  outInfo.blockCount = 1;
10588  outInfo.allocationCount = (uint32_t)GetAllocationCount();
10589  outInfo.unusedRangeCount = 0;
10590  outInfo.usedBytes = 0;
10591  outInfo.allocationSizeMin = UINT64_MAX;
10592  outInfo.allocationSizeMax = 0;
10593  outInfo.unusedRangeSizeMin = UINT64_MAX;
10594  outInfo.unusedRangeSizeMax = 0;
10595 
10596  VkDeviceSize lastOffset = 0;
10597 
10598  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10599  {
10600  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10601  size_t nextAlloc2ndIndex = 0;
10602  while(lastOffset < freeSpace2ndTo1stEnd)
10603  {
10604  // Find next non-null allocation or move nextAllocIndex to the end.
10605  while(nextAlloc2ndIndex < suballoc2ndCount &&
10606  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10607  {
10608  ++nextAlloc2ndIndex;
10609  }
10610 
10611  // Found non-null allocation.
10612  if(nextAlloc2ndIndex < suballoc2ndCount)
10613  {
10614  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10615 
10616  // 1. Process free space before this allocation.
10617  if(lastOffset < suballoc.offset)
10618  {
10619  // There is free space from lastOffset to suballoc.offset.
10620  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10621  ++outInfo.unusedRangeCount;
10622  outInfo.unusedBytes += unusedRangeSize;
10623  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10624  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10625  }
10626 
10627  // 2. Process this allocation.
10628  // There is allocation with suballoc.offset, suballoc.size.
10629  outInfo.usedBytes += suballoc.size;
10630  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10631  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10632 
10633  // 3. Prepare for next iteration.
10634  lastOffset = suballoc.offset + suballoc.size;
10635  ++nextAlloc2ndIndex;
10636  }
10637  // We are at the end.
10638  else
10639  {
10640  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10641  if(lastOffset < freeSpace2ndTo1stEnd)
10642  {
10643  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10644  ++outInfo.unusedRangeCount;
10645  outInfo.unusedBytes += unusedRangeSize;
10646  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10647  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10648  }
10649 
10650  // End of loop.
10651  lastOffset = freeSpace2ndTo1stEnd;
10652  }
10653  }
10654  }
10655 
10656  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10657  const VkDeviceSize freeSpace1stTo2ndEnd =
10658  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10659  while(lastOffset < freeSpace1stTo2ndEnd)
10660  {
10661  // Find next non-null allocation or move nextAllocIndex to the end.
10662  while(nextAlloc1stIndex < suballoc1stCount &&
10663  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10664  {
10665  ++nextAlloc1stIndex;
10666  }
10667 
10668  // Found non-null allocation.
10669  if(nextAlloc1stIndex < suballoc1stCount)
10670  {
10671  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10672 
10673  // 1. Process free space before this allocation.
10674  if(lastOffset < suballoc.offset)
10675  {
10676  // There is free space from lastOffset to suballoc.offset.
10677  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10678  ++outInfo.unusedRangeCount;
10679  outInfo.unusedBytes += unusedRangeSize;
10680  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10681  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10682  }
10683 
10684  // 2. Process this allocation.
10685  // There is allocation with suballoc.offset, suballoc.size.
10686  outInfo.usedBytes += suballoc.size;
10687  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10688  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10689 
10690  // 3. Prepare for next iteration.
10691  lastOffset = suballoc.offset + suballoc.size;
10692  ++nextAlloc1stIndex;
10693  }
10694  // We are at the end.
10695  else
10696  {
10697  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10698  if(lastOffset < freeSpace1stTo2ndEnd)
10699  {
10700  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10701  ++outInfo.unusedRangeCount;
10702  outInfo.unusedBytes += unusedRangeSize;
10703  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10704  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10705  }
10706 
10707  // End of loop.
10708  lastOffset = freeSpace1stTo2ndEnd;
10709  }
10710  }
10711 
10712  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10713  {
10714  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10715  while(lastOffset < size)
10716  {
10717  // Find next non-null allocation or move nextAllocIndex to the end.
10718  while(nextAlloc2ndIndex != SIZE_MAX &&
10719  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10720  {
10721  --nextAlloc2ndIndex;
10722  }
10723 
10724  // Found non-null allocation.
10725  if(nextAlloc2ndIndex != SIZE_MAX)
10726  {
10727  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10728 
10729  // 1. Process free space before this allocation.
10730  if(lastOffset < suballoc.offset)
10731  {
10732  // There is free space from lastOffset to suballoc.offset.
10733  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10734  ++outInfo.unusedRangeCount;
10735  outInfo.unusedBytes += unusedRangeSize;
10736  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10737  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10738  }
10739 
10740  // 2. Process this allocation.
10741  // There is allocation with suballoc.offset, suballoc.size.
10742  outInfo.usedBytes += suballoc.size;
10743  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10744  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10745 
10746  // 3. Prepare for next iteration.
10747  lastOffset = suballoc.offset + suballoc.size;
10748  --nextAlloc2ndIndex;
10749  }
10750  // We are at the end.
10751  else
10752  {
10753  // There is free space from lastOffset to size.
10754  if(lastOffset < size)
10755  {
10756  const VkDeviceSize unusedRangeSize = size - lastOffset;
10757  ++outInfo.unusedRangeCount;
10758  outInfo.unusedBytes += unusedRangeSize;
10759  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10760  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10761  }
10762 
10763  // End of loop.
10764  lastOffset = size;
10765  }
10766  }
10767  }
10768 
10769  outInfo.unusedBytes = size - outInfo.usedBytes;
10770 }
10771 
10772 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
10773 {
10774  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10775  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10776  const VkDeviceSize size = GetSize();
10777  const size_t suballoc1stCount = suballocations1st.size();
10778  const size_t suballoc2ndCount = suballocations2nd.size();
10779 
10780  inoutStats.size += size;
10781 
10782  VkDeviceSize lastOffset = 0;
10783 
10784  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10785  {
10786  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10787  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
10788  while(lastOffset < freeSpace2ndTo1stEnd)
10789  {
10790  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10791  while(nextAlloc2ndIndex < suballoc2ndCount &&
10792  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10793  {
10794  ++nextAlloc2ndIndex;
10795  }
10796 
10797  // Found non-null allocation.
10798  if(nextAlloc2ndIndex < suballoc2ndCount)
10799  {
10800  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10801 
10802  // 1. Process free space before this allocation.
10803  if(lastOffset < suballoc.offset)
10804  {
10805  // There is free space from lastOffset to suballoc.offset.
10806  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10807  inoutStats.unusedSize += unusedRangeSize;
10808  ++inoutStats.unusedRangeCount;
10809  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10810  }
10811 
10812  // 2. Process this allocation.
10813  // There is allocation with suballoc.offset, suballoc.size.
10814  ++inoutStats.allocationCount;
10815 
10816  // 3. Prepare for next iteration.
10817  lastOffset = suballoc.offset + suballoc.size;
10818  ++nextAlloc2ndIndex;
10819  }
10820  // We are at the end.
10821  else
10822  {
10823  if(lastOffset < freeSpace2ndTo1stEnd)
10824  {
10825  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10826  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10827  inoutStats.unusedSize += unusedRangeSize;
10828  ++inoutStats.unusedRangeCount;
10829  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10830  }
10831 
10832  // End of loop.
10833  lastOffset = freeSpace2ndTo1stEnd;
10834  }
10835  }
10836  }
10837 
10838  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10839  const VkDeviceSize freeSpace1stTo2ndEnd =
10840  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10841  while(lastOffset < freeSpace1stTo2ndEnd)
10842  {
10843  // Find next non-null allocation or move nextAllocIndex to the end.
10844  while(nextAlloc1stIndex < suballoc1stCount &&
10845  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10846  {
10847  ++nextAlloc1stIndex;
10848  }
10849 
10850  // Found non-null allocation.
10851  if(nextAlloc1stIndex < suballoc1stCount)
10852  {
10853  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10854 
10855  // 1. Process free space before this allocation.
10856  if(lastOffset < suballoc.offset)
10857  {
10858  // There is free space from lastOffset to suballoc.offset.
10859  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10860  inoutStats.unusedSize += unusedRangeSize;
10861  ++inoutStats.unusedRangeCount;
10862  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10863  }
10864 
10865  // 2. Process this allocation.
10866  // There is allocation with suballoc.offset, suballoc.size.
10867  ++inoutStats.allocationCount;
10868 
10869  // 3. Prepare for next iteration.
10870  lastOffset = suballoc.offset + suballoc.size;
10871  ++nextAlloc1stIndex;
10872  }
10873  // We are at the end.
10874  else
10875  {
10876  if(lastOffset < freeSpace1stTo2ndEnd)
10877  {
10878  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10879  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10880  inoutStats.unusedSize += unusedRangeSize;
10881  ++inoutStats.unusedRangeCount;
10882  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10883  }
10884 
10885  // End of loop.
10886  lastOffset = freeSpace1stTo2ndEnd;
10887  }
10888  }
10889 
10890  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10891  {
10892  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10893  while(lastOffset < size)
10894  {
10895  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10896  while(nextAlloc2ndIndex != SIZE_MAX &&
10897  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10898  {
10899  --nextAlloc2ndIndex;
10900  }
10901 
10902  // Found non-null allocation.
10903  if(nextAlloc2ndIndex != SIZE_MAX)
10904  {
10905  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10906 
10907  // 1. Process free space before this allocation.
10908  if(lastOffset < suballoc.offset)
10909  {
10910  // There is free space from lastOffset to suballoc.offset.
10911  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10912  inoutStats.unusedSize += unusedRangeSize;
10913  ++inoutStats.unusedRangeCount;
10914  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10915  }
10916 
10917  // 2. Process this allocation.
10918  // There is allocation with suballoc.offset, suballoc.size.
10919  ++inoutStats.allocationCount;
10920 
10921  // 3. Prepare for next iteration.
10922  lastOffset = suballoc.offset + suballoc.size;
10923  --nextAlloc2ndIndex;
10924  }
10925  // We are at the end.
10926  else
10927  {
10928  if(lastOffset < size)
10929  {
10930  // There is free space from lastOffset to size.
10931  const VkDeviceSize unusedRangeSize = size - lastOffset;
10932  inoutStats.unusedSize += unusedRangeSize;
10933  ++inoutStats.unusedRangeCount;
10934  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10935  }
10936 
10937  // End of loop.
10938  lastOffset = size;
10939  }
10940  }
10941  }
10942 }
10943 
10944 #if VMA_STATS_STRING_ENABLED
10945 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
10946 {
10947  const VkDeviceSize size = GetSize();
10948  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10949  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10950  const size_t suballoc1stCount = suballocations1st.size();
10951  const size_t suballoc2ndCount = suballocations2nd.size();
10952 
10953  // FIRST PASS
10954 
10955  size_t unusedRangeCount = 0;
10956  VkDeviceSize usedBytes = 0;
10957 
10958  VkDeviceSize lastOffset = 0;
10959 
10960  size_t alloc2ndCount = 0;
10961  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10962  {
10963  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10964  size_t nextAlloc2ndIndex = 0;
10965  while(lastOffset < freeSpace2ndTo1stEnd)
10966  {
10967  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10968  while(nextAlloc2ndIndex < suballoc2ndCount &&
10969  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10970  {
10971  ++nextAlloc2ndIndex;
10972  }
10973 
10974  // Found non-null allocation.
10975  if(nextAlloc2ndIndex < suballoc2ndCount)
10976  {
10977  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10978 
10979  // 1. Process free space before this allocation.
10980  if(lastOffset < suballoc.offset)
10981  {
10982  // There is free space from lastOffset to suballoc.offset.
10983  ++unusedRangeCount;
10984  }
10985 
10986  // 2. Process this allocation.
10987  // There is allocation with suballoc.offset, suballoc.size.
10988  ++alloc2ndCount;
10989  usedBytes += suballoc.size;
10990 
10991  // 3. Prepare for next iteration.
10992  lastOffset = suballoc.offset + suballoc.size;
10993  ++nextAlloc2ndIndex;
10994  }
10995  // We are at the end.
10996  else
10997  {
10998  if(lastOffset < freeSpace2ndTo1stEnd)
10999  {
11000  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
11001  ++unusedRangeCount;
11002  }
11003 
11004  // End of loop.
11005  lastOffset = freeSpace2ndTo1stEnd;
11006  }
11007  }
11008  }
11009 
11010  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
11011  size_t alloc1stCount = 0;
11012  const VkDeviceSize freeSpace1stTo2ndEnd =
11013  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
11014  while(lastOffset < freeSpace1stTo2ndEnd)
11015  {
11016  // Find next non-null allocation or move nextAllocIndex to the end.
11017  while(nextAlloc1stIndex < suballoc1stCount &&
11018  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
11019  {
11020  ++nextAlloc1stIndex;
11021  }
11022 
11023  // Found non-null allocation.
11024  if(nextAlloc1stIndex < suballoc1stCount)
11025  {
11026  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
11027 
11028  // 1. Process free space before this allocation.
11029  if(lastOffset < suballoc.offset)
11030  {
11031  // There is free space from lastOffset to suballoc.offset.
11032  ++unusedRangeCount;
11033  }
11034 
11035  // 2. Process this allocation.
11036  // There is allocation with suballoc.offset, suballoc.size.
11037  ++alloc1stCount;
11038  usedBytes += suballoc.size;
11039 
11040  // 3. Prepare for next iteration.
11041  lastOffset = suballoc.offset + suballoc.size;
11042  ++nextAlloc1stIndex;
11043  }
11044  // We are at the end.
11045  else
11046  {
11047  if(lastOffset < size)
11048  {
11049  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
11050  ++unusedRangeCount;
11051  }
11052 
11053  // End of loop.
11054  lastOffset = freeSpace1stTo2ndEnd;
11055  }
11056  }
11057 
11058  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11059  {
11060  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
11061  while(lastOffset < size)
11062  {
11063  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
11064  while(nextAlloc2ndIndex != SIZE_MAX &&
11065  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
11066  {
11067  --nextAlloc2ndIndex;
11068  }
11069 
11070  // Found non-null allocation.
11071  if(nextAlloc2ndIndex != SIZE_MAX)
11072  {
11073  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
11074 
11075  // 1. Process free space before this allocation.
11076  if(lastOffset < suballoc.offset)
11077  {
11078  // There is free space from lastOffset to suballoc.offset.
11079  ++unusedRangeCount;
11080  }
11081 
11082  // 2. Process this allocation.
11083  // There is allocation with suballoc.offset, suballoc.size.
11084  ++alloc2ndCount;
11085  usedBytes += suballoc.size;
11086 
11087  // 3. Prepare for next iteration.
11088  lastOffset = suballoc.offset + suballoc.size;
11089  --nextAlloc2ndIndex;
11090  }
11091  // We are at the end.
11092  else
11093  {
11094  if(lastOffset < size)
11095  {
11096  // There is free space from lastOffset to size.
11097  ++unusedRangeCount;
11098  }
11099 
11100  // End of loop.
11101  lastOffset = size;
11102  }
11103  }
11104  }
11105 
11106  const VkDeviceSize unusedBytes = size - usedBytes;
11107  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
11108 
11109  // SECOND PASS
11110  lastOffset = 0;
11111 
11112  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11113  {
11114  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
11115  size_t nextAlloc2ndIndex = 0;
11116  while(lastOffset < freeSpace2ndTo1stEnd)
11117  {
11118  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
11119  while(nextAlloc2ndIndex < suballoc2ndCount &&
11120  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
11121  {
11122  ++nextAlloc2ndIndex;
11123  }
11124 
11125  // Found non-null allocation.
11126  if(nextAlloc2ndIndex < suballoc2ndCount)
11127  {
11128  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
11129 
11130  // 1. Process free space before this allocation.
11131  if(lastOffset < suballoc.offset)
11132  {
11133  // There is free space from lastOffset to suballoc.offset.
11134  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
11135  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11136  }
11137 
11138  // 2. Process this allocation.
11139  // There is allocation with suballoc.offset, suballoc.size.
11140  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
11141 
11142  // 3. Prepare for next iteration.
11143  lastOffset = suballoc.offset + suballoc.size;
11144  ++nextAlloc2ndIndex;
11145  }
11146  // We are at the end.
11147  else
11148  {
11149  if(lastOffset < freeSpace2ndTo1stEnd)
11150  {
11151  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
11152  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
11153  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11154  }
11155 
11156  // End of loop.
11157  lastOffset = freeSpace2ndTo1stEnd;
11158  }
11159  }
11160  }
11161 
11162  nextAlloc1stIndex = m_1stNullItemsBeginCount;
11163  while(lastOffset < freeSpace1stTo2ndEnd)
11164  {
11165  // Find next non-null allocation or move nextAllocIndex to the end.
11166  while(nextAlloc1stIndex < suballoc1stCount &&
11167  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
11168  {
11169  ++nextAlloc1stIndex;
11170  }
11171 
11172  // Found non-null allocation.
11173  if(nextAlloc1stIndex < suballoc1stCount)
11174  {
11175  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
11176 
11177  // 1. Process free space before this allocation.
11178  if(lastOffset < suballoc.offset)
11179  {
11180  // There is free space from lastOffset to suballoc.offset.
11181  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
11182  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11183  }
11184 
11185  // 2. Process this allocation.
11186  // There is allocation with suballoc.offset, suballoc.size.
11187  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
11188 
11189  // 3. Prepare for next iteration.
11190  lastOffset = suballoc.offset + suballoc.size;
11191  ++nextAlloc1stIndex;
11192  }
11193  // We are at the end.
11194  else
11195  {
11196  if(lastOffset < freeSpace1stTo2ndEnd)
11197  {
11198  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
11199  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
11200  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11201  }
11202 
11203  // End of loop.
11204  lastOffset = freeSpace1stTo2ndEnd;
11205  }
11206  }
11207 
11208  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11209  {
11210  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
11211  while(lastOffset < size)
11212  {
11213  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
11214  while(nextAlloc2ndIndex != SIZE_MAX &&
11215  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
11216  {
11217  --nextAlloc2ndIndex;
11218  }
11219 
11220  // Found non-null allocation.
11221  if(nextAlloc2ndIndex != SIZE_MAX)
11222  {
11223  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
11224 
11225  // 1. Process free space before this allocation.
11226  if(lastOffset < suballoc.offset)
11227  {
11228  // There is free space from lastOffset to suballoc.offset.
11229  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
11230  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11231  }
11232 
11233  // 2. Process this allocation.
11234  // There is allocation with suballoc.offset, suballoc.size.
11235  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
11236 
11237  // 3. Prepare for next iteration.
11238  lastOffset = suballoc.offset + suballoc.size;
11239  --nextAlloc2ndIndex;
11240  }
11241  // We are at the end.
11242  else
11243  {
11244  if(lastOffset < size)
11245  {
11246  // There is free space from lastOffset to size.
11247  const VkDeviceSize unusedRangeSize = size - lastOffset;
11248  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11249  }
11250 
11251  // End of loop.
11252  lastOffset = size;
11253  }
11254  }
11255  }
11256 
11257  PrintDetailedMap_End(json);
11258 }
11259 #endif // #if VMA_STATS_STRING_ENABLED
11260 
11261 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
11262  uint32_t currentFrameIndex,
11263  uint32_t frameInUseCount,
11264  VkDeviceSize bufferImageGranularity,
11265  VkDeviceSize allocSize,
11266  VkDeviceSize allocAlignment,
11267  bool upperAddress,
11268  VmaSuballocationType allocType,
11269  bool canMakeOtherLost,
11270  uint32_t strategy,
11271  VmaAllocationRequest* pAllocationRequest)
11272 {
11273  VMA_ASSERT(allocSize > 0);
11274  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
11275  VMA_ASSERT(pAllocationRequest != VMA_NULL);
11276  VMA_HEAVY_ASSERT(Validate());
11277  return upperAddress ?
11278  CreateAllocationRequest_UpperAddress(
11279  currentFrameIndex, frameInUseCount, bufferImageGranularity,
11280  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
11281  CreateAllocationRequest_LowerAddress(
11282  currentFrameIndex, frameInUseCount, bufferImageGranularity,
11283  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
11284 }
11285 
11286 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
11287  uint32_t currentFrameIndex,
11288  uint32_t frameInUseCount,
11289  VkDeviceSize bufferImageGranularity,
11290  VkDeviceSize allocSize,
11291  VkDeviceSize allocAlignment,
11292  VmaSuballocationType allocType,
11293  bool canMakeOtherLost,
11294  uint32_t strategy,
11295  VmaAllocationRequest* pAllocationRequest)
11296 {
11297  const VkDeviceSize size = GetSize();
11298  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11299  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11300 
11301  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11302  {
11303  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
11304  return false;
11305  }
11306 
11307  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
11308  if(allocSize > size)
11309  {
11310  return false;
11311  }
11312  VkDeviceSize resultBaseOffset = size - allocSize;
11313  if(!suballocations2nd.empty())
11314  {
11315  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
11316  resultBaseOffset = lastSuballoc.offset - allocSize;
11317  if(allocSize > lastSuballoc.offset)
11318  {
11319  return false;
11320  }
11321  }
11322 
11323  // Start from offset equal to end of free space.
11324  VkDeviceSize resultOffset = resultBaseOffset;
11325 
11326  // Apply VMA_DEBUG_MARGIN at the end.
11327  if(VMA_DEBUG_MARGIN > 0)
11328  {
11329  if(resultOffset < VMA_DEBUG_MARGIN)
11330  {
11331  return false;
11332  }
11333  resultOffset -= VMA_DEBUG_MARGIN;
11334  }
11335 
11336  // Apply alignment.
11337  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
11338 
11339  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
11340  // Make bigger alignment if necessary.
11341  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
11342  {
11343  bool bufferImageGranularityConflict = false;
11344  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
11345  {
11346  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
11347  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11348  {
11349  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
11350  {
11351  bufferImageGranularityConflict = true;
11352  break;
11353  }
11354  }
11355  else
11356  // Already on previous page.
11357  break;
11358  }
11359  if(bufferImageGranularityConflict)
11360  {
11361  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
11362  }
11363  }
11364 
11365  // There is enough free space.
11366  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
11367  suballocations1st.back().offset + suballocations1st.back().size :
11368  0;
11369  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
11370  {
11371  // Check previous suballocations for BufferImageGranularity conflicts.
11372  // If conflict exists, allocation cannot be made here.
11373  if(bufferImageGranularity > 1)
11374  {
11375  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11376  {
11377  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11378  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11379  {
11380  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
11381  {
11382  return false;
11383  }
11384  }
11385  else
11386  {
11387  // Already on next page.
11388  break;
11389  }
11390  }
11391  }
11392 
11393  // All tests passed: Success.
11394  pAllocationRequest->offset = resultOffset;
11395  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
11396  pAllocationRequest->sumItemSize = 0;
11397  // pAllocationRequest->item unused.
11398  pAllocationRequest->itemsToMakeLostCount = 0;
11399  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
11400  return true;
11401  }
11402 
11403  return false;
11404 }
11405 
11406 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
11407  uint32_t currentFrameIndex,
11408  uint32_t frameInUseCount,
11409  VkDeviceSize bufferImageGranularity,
11410  VkDeviceSize allocSize,
11411  VkDeviceSize allocAlignment,
11412  VmaSuballocationType allocType,
11413  bool canMakeOtherLost,
11414  uint32_t strategy,
11415  VmaAllocationRequest* pAllocationRequest)
11416 {
11417  const VkDeviceSize size = GetSize();
11418  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11419  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11420 
11421  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11422  {
11423  // Try to allocate at the end of 1st vector.
11424 
11425  VkDeviceSize resultBaseOffset = 0;
11426  if(!suballocations1st.empty())
11427  {
11428  const VmaSuballocation& lastSuballoc = suballocations1st.back();
11429  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11430  }
11431 
11432  // Start from offset equal to beginning of free space.
11433  VkDeviceSize resultOffset = resultBaseOffset;
11434 
11435  // Apply VMA_DEBUG_MARGIN at the beginning.
11436  if(VMA_DEBUG_MARGIN > 0)
11437  {
11438  resultOffset += VMA_DEBUG_MARGIN;
11439  }
11440 
11441  // Apply alignment.
11442  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11443 
11444  // Check previous suballocations for BufferImageGranularity conflicts.
11445  // Make bigger alignment if necessary.
11446  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
11447  {
11448  bool bufferImageGranularityConflict = false;
11449  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11450  {
11451  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11452  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11453  {
11454  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11455  {
11456  bufferImageGranularityConflict = true;
11457  break;
11458  }
11459  }
11460  else
11461  // Already on previous page.
11462  break;
11463  }
11464  if(bufferImageGranularityConflict)
11465  {
11466  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11467  }
11468  }
11469 
11470  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
11471  suballocations2nd.back().offset : size;
11472 
11473  // There is enough free space at the end after alignment.
11474  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
11475  {
11476  // Check next suballocations for BufferImageGranularity conflicts.
11477  // If conflict exists, allocation cannot be made here.
11478  if((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11479  {
11480  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
11481  {
11482  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
11483  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11484  {
11485  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11486  {
11487  return false;
11488  }
11489  }
11490  else
11491  {
11492  // Already on previous page.
11493  break;
11494  }
11495  }
11496  }
11497 
11498  // All tests passed: Success.
11499  pAllocationRequest->offset = resultOffset;
11500  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
11501  pAllocationRequest->sumItemSize = 0;
11502  // pAllocationRequest->item, customData unused.
11503  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
11504  pAllocationRequest->itemsToMakeLostCount = 0;
11505  return true;
11506  }
11507  }
11508 
11509  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
11510  // beginning of 1st vector as the end of free space.
11511  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11512  {
11513  VMA_ASSERT(!suballocations1st.empty());
11514 
11515  VkDeviceSize resultBaseOffset = 0;
11516  if(!suballocations2nd.empty())
11517  {
11518  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
11519  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11520  }
11521 
11522  // Start from offset equal to beginning of free space.
11523  VkDeviceSize resultOffset = resultBaseOffset;
11524 
11525  // Apply VMA_DEBUG_MARGIN at the beginning.
11526  if(VMA_DEBUG_MARGIN > 0)
11527  {
11528  resultOffset += VMA_DEBUG_MARGIN;
11529  }
11530 
11531  // Apply alignment.
11532  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11533 
11534  // Check previous suballocations for BufferImageGranularity conflicts.
11535  // Make bigger alignment if necessary.
11536  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
11537  {
11538  bool bufferImageGranularityConflict = false;
11539  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
11540  {
11541  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
11542  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11543  {
11544  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11545  {
11546  bufferImageGranularityConflict = true;
11547  break;
11548  }
11549  }
11550  else
11551  // Already on previous page.
11552  break;
11553  }
11554  if(bufferImageGranularityConflict)
11555  {
11556  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11557  }
11558  }
11559 
11560  pAllocationRequest->itemsToMakeLostCount = 0;
11561  pAllocationRequest->sumItemSize = 0;
11562  size_t index1st = m_1stNullItemsBeginCount;
11563 
11564  if(canMakeOtherLost)
11565  {
11566  while(index1st < suballocations1st.size() &&
11567  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
11568  {
11569  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
11570  const VmaSuballocation& suballoc = suballocations1st[index1st];
11571  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
11572  {
11573  // No problem.
11574  }
11575  else
11576  {
11577  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11578  if(suballoc.hAllocation->CanBecomeLost() &&
11579  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11580  {
11581  ++pAllocationRequest->itemsToMakeLostCount;
11582  pAllocationRequest->sumItemSize += suballoc.size;
11583  }
11584  else
11585  {
11586  return false;
11587  }
11588  }
11589  ++index1st;
11590  }
11591 
11592  // Check next suballocations for BufferImageGranularity conflicts.
11593  // If conflict exists, we must mark more allocations lost or fail.
11594  if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
11595  {
11596  while(index1st < suballocations1st.size())
11597  {
11598  const VmaSuballocation& suballoc = suballocations1st[index1st];
11599  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
11600  {
11601  if(suballoc.hAllocation != VK_NULL_HANDLE)
11602  {
11603  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
11604  if(suballoc.hAllocation->CanBecomeLost() &&
11605  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11606  {
11607  ++pAllocationRequest->itemsToMakeLostCount;
11608  pAllocationRequest->sumItemSize += suballoc.size;
11609  }
11610  else
11611  {
11612  return false;
11613  }
11614  }
11615  }
11616  else
11617  {
11618  // Already on next page.
11619  break;
11620  }
11621  ++index1st;
11622  }
11623  }
11624 
11625  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
11626  if(index1st == suballocations1st.size() &&
11627  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
11628  {
11629  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
11630  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
11631  }
11632  }
11633 
11634  // There is enough free space at the end after alignment.
11635  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
11636  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
11637  {
11638  // Check next suballocations for BufferImageGranularity conflicts.
11639  // If conflict exists, allocation cannot be made here.
11640  if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
11641  {
11642  for(size_t nextSuballocIndex = index1st;
11643  nextSuballocIndex < suballocations1st.size();
11644  nextSuballocIndex++)
11645  {
11646  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
11647  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11648  {
11649  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11650  {
11651  return false;
11652  }
11653  }
11654  else
11655  {
11656  // Already on next page.
11657  break;
11658  }
11659  }
11660  }
11661 
11662  // All tests passed: Success.
11663  pAllocationRequest->offset = resultOffset;
11664  pAllocationRequest->sumFreeSize =
11665  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
11666  - resultBaseOffset
11667  - pAllocationRequest->sumItemSize;
11668  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
11669  // pAllocationRequest->item, customData unused.
11670  return true;
11671  }
11672  }
11673 
11674  return false;
11675 }
11676 
11677 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
11678  uint32_t currentFrameIndex,
11679  uint32_t frameInUseCount,
11680  VmaAllocationRequest* pAllocationRequest)
11681 {
11682  if(pAllocationRequest->itemsToMakeLostCount == 0)
11683  {
11684  return true;
11685  }
11686 
11687  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
11688 
11689  // We always start from 1st.
11690  SuballocationVectorType* suballocations = &AccessSuballocations1st();
11691  size_t index = m_1stNullItemsBeginCount;
11692  size_t madeLostCount = 0;
11693  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
11694  {
11695  if(index == suballocations->size())
11696  {
11697  index = 0;
11698  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
11699  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11700  {
11701  suballocations = &AccessSuballocations2nd();
11702  }
11703  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
11704  // suballocations continues pointing at AccessSuballocations1st().
11705  VMA_ASSERT(!suballocations->empty());
11706  }
11707  VmaSuballocation& suballoc = (*suballocations)[index];
11708  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11709  {
11710  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11711  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
11712  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11713  {
11714  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11715  suballoc.hAllocation = VK_NULL_HANDLE;
11716  m_SumFreeSize += suballoc.size;
11717  if(suballocations == &AccessSuballocations1st())
11718  {
11719  ++m_1stNullItemsMiddleCount;
11720  }
11721  else
11722  {
11723  ++m_2ndNullItemsCount;
11724  }
11725  ++madeLostCount;
11726  }
11727  else
11728  {
11729  return false;
11730  }
11731  }
11732  ++index;
11733  }
11734 
11735  CleanupAfterFree();
11736  //VMA_HEAVY_ASSERT(Validate()); // Already called by CleanupAfterFree().
11737 
11738  return true;
11739 }
11740 
11741 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11742 {
11743  uint32_t lostAllocationCount = 0;
11744 
11745  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11746  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11747  {
11748  VmaSuballocation& suballoc = suballocations1st[i];
11749  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11750  suballoc.hAllocation->CanBecomeLost() &&
11751  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11752  {
11753  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11754  suballoc.hAllocation = VK_NULL_HANDLE;
11755  ++m_1stNullItemsMiddleCount;
11756  m_SumFreeSize += suballoc.size;
11757  ++lostAllocationCount;
11758  }
11759  }
11760 
11761  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11762  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11763  {
11764  VmaSuballocation& suballoc = suballocations2nd[i];
11765  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11766  suballoc.hAllocation->CanBecomeLost() &&
11767  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11768  {
11769  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11770  suballoc.hAllocation = VK_NULL_HANDLE;
11771  ++m_2ndNullItemsCount;
11772  m_SumFreeSize += suballoc.size;
11773  ++lostAllocationCount;
11774  }
11775  }
11776 
11777  if(lostAllocationCount)
11778  {
11779  CleanupAfterFree();
11780  }
11781 
11782  return lostAllocationCount;
11783 }
11784 
11785 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
11786 {
11787  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11788  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11789  {
11790  const VmaSuballocation& suballoc = suballocations1st[i];
11791  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11792  {
11793  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11794  {
11795  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11796  return VK_ERROR_VALIDATION_FAILED_EXT;
11797  }
11798  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11799  {
11800  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11801  return VK_ERROR_VALIDATION_FAILED_EXT;
11802  }
11803  }
11804  }
11805 
11806  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11807  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11808  {
11809  const VmaSuballocation& suballoc = suballocations2nd[i];
11810  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11811  {
11812  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11813  {
11814  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11815  return VK_ERROR_VALIDATION_FAILED_EXT;
11816  }
11817  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11818  {
11819  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11820  return VK_ERROR_VALIDATION_FAILED_EXT;
11821  }
11822  }
11823  }
11824 
11825  return VK_SUCCESS;
11826 }
11827 
11828 void VmaBlockMetadata_Linear::Alloc(
11829  const VmaAllocationRequest& request,
11830  VmaSuballocationType type,
11831  VkDeviceSize allocSize,
11832  VmaAllocation hAllocation)
11833 {
11834  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
11835 
11836  switch(request.type)
11837  {
11838  case VmaAllocationRequestType::UpperAddress:
11839  {
11840  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
11841  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
11842  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11843  suballocations2nd.push_back(newSuballoc);
11844  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
11845  }
11846  break;
11847  case VmaAllocationRequestType::EndOf1st:
11848  {
11849  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11850 
11851  VMA_ASSERT(suballocations1st.empty() ||
11852  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
11853  // Check if it fits before the end of the block.
11854  VMA_ASSERT(request.offset + allocSize <= GetSize());
11855 
11856  suballocations1st.push_back(newSuballoc);
11857  }
11858  break;
11859  case VmaAllocationRequestType::EndOf2nd:
11860  {
11861  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11862  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
11863  VMA_ASSERT(!suballocations1st.empty() &&
11864  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
11865  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11866 
11867  switch(m_2ndVectorMode)
11868  {
11869  case SECOND_VECTOR_EMPTY:
11870  // First allocation from second part ring buffer.
11871  VMA_ASSERT(suballocations2nd.empty());
11872  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
11873  break;
11874  case SECOND_VECTOR_RING_BUFFER:
11875  // 2-part ring buffer is already started.
11876  VMA_ASSERT(!suballocations2nd.empty());
11877  break;
11878  case SECOND_VECTOR_DOUBLE_STACK:
11879  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
11880  break;
11881  default:
11882  VMA_ASSERT(0);
11883  }
11884 
11885  suballocations2nd.push_back(newSuballoc);
11886  }
11887  break;
11888  default:
11889  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
11890  }
11891 
11892  m_SumFreeSize -= newSuballoc.size;
11893 }
11894 
11895 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
11896 {
11897  FreeAtOffset(allocation->GetOffset());
11898 }
11899 
11900 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
11901 {
11902  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11903  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11904 
11905  if(!suballocations1st.empty())
11906  {
11907  // First allocation: Mark it as next empty at the beginning.
11908  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
11909  if(firstSuballoc.offset == offset)
11910  {
11911  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11912  firstSuballoc.hAllocation = VK_NULL_HANDLE;
11913  m_SumFreeSize += firstSuballoc.size;
11914  ++m_1stNullItemsBeginCount;
11915  CleanupAfterFree();
11916  return;
11917  }
11918  }
11919 
11920  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
11921  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
11922  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11923  {
11924  VmaSuballocation& lastSuballoc = suballocations2nd.back();
11925  if(lastSuballoc.offset == offset)
11926  {
11927  m_SumFreeSize += lastSuballoc.size;
11928  suballocations2nd.pop_back();
11929  CleanupAfterFree();
11930  return;
11931  }
11932  }
11933  // Last allocation in 1st vector.
11934  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
11935  {
11936  VmaSuballocation& lastSuballoc = suballocations1st.back();
11937  if(lastSuballoc.offset == offset)
11938  {
11939  m_SumFreeSize += lastSuballoc.size;
11940  suballocations1st.pop_back();
11941  CleanupAfterFree();
11942  return;
11943  }
11944  }
11945 
11946  // Item from the middle of 1st vector.
11947  {
11948  VmaSuballocation refSuballoc;
11949  refSuballoc.offset = offset;
11950  // Rest of members stays uninitialized intentionally for better performance.
11951  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11952  suballocations1st.begin() + m_1stNullItemsBeginCount,
11953  suballocations1st.end(),
11954  refSuballoc,
11955  VmaSuballocationOffsetLess());
11956  if(it != suballocations1st.end())
11957  {
11958  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11959  it->hAllocation = VK_NULL_HANDLE;
11960  ++m_1stNullItemsMiddleCount;
11961  m_SumFreeSize += it->size;
11962  CleanupAfterFree();
11963  return;
11964  }
11965  }
11966 
11967  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11968  {
11969  // Item from the middle of 2nd vector.
11970  VmaSuballocation refSuballoc;
11971  refSuballoc.offset = offset;
11972  // Rest of members stays uninitialized intentionally for better performance.
11973  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11974  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11975  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11976  if(it != suballocations2nd.end())
11977  {
11978  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11979  it->hAllocation = VK_NULL_HANDLE;
11980  ++m_2ndNullItemsCount;
11981  m_SumFreeSize += it->size;
11982  CleanupAfterFree();
11983  return;
11984  }
11985  }
11986 
11987  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11988 }
11989 
11990 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11991 {
11992  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11993  const size_t suballocCount = AccessSuballocations1st().size();
11994  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11995 }
11996 
11997 void VmaBlockMetadata_Linear::CleanupAfterFree()
11998 {
11999  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
12000  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
12001 
12002  if(IsEmpty())
12003  {
12004  suballocations1st.clear();
12005  suballocations2nd.clear();
12006  m_1stNullItemsBeginCount = 0;
12007  m_1stNullItemsMiddleCount = 0;
12008  m_2ndNullItemsCount = 0;
12009  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
12010  }
12011  else
12012  {
12013  const size_t suballoc1stCount = suballocations1st.size();
12014  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
12015  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
12016 
12017  // Find more null items at the beginning of 1st vector.
12018  while(m_1stNullItemsBeginCount < suballoc1stCount &&
12019  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
12020  {
12021  ++m_1stNullItemsBeginCount;
12022  --m_1stNullItemsMiddleCount;
12023  }
12024 
12025  // Find more null items at the end of 1st vector.
12026  while(m_1stNullItemsMiddleCount > 0 &&
12027  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
12028  {
12029  --m_1stNullItemsMiddleCount;
12030  suballocations1st.pop_back();
12031  }
12032 
12033  // Find more null items at the end of 2nd vector.
12034  while(m_2ndNullItemsCount > 0 &&
12035  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
12036  {
12037  --m_2ndNullItemsCount;
12038  suballocations2nd.pop_back();
12039  }
12040 
12041  // Find more null items at the beginning of 2nd vector.
12042  while(m_2ndNullItemsCount > 0 &&
12043  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
12044  {
12045  --m_2ndNullItemsCount;
12046  VmaVectorRemove(suballocations2nd, 0);
12047  }
12048 
12049  if(ShouldCompact1st())
12050  {
12051  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
12052  size_t srcIndex = m_1stNullItemsBeginCount;
12053  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
12054  {
12055  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
12056  {
12057  ++srcIndex;
12058  }
12059  if(dstIndex != srcIndex)
12060  {
12061  suballocations1st[dstIndex] = suballocations1st[srcIndex];
12062  }
12063  ++srcIndex;
12064  }
12065  suballocations1st.resize(nonNullItemCount);
12066  m_1stNullItemsBeginCount = 0;
12067  m_1stNullItemsMiddleCount = 0;
12068  }
12069 
12070  // 2nd vector became empty.
12071  if(suballocations2nd.empty())
12072  {
12073  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
12074  }
12075 
12076  // 1st vector became empty.
12077  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
12078  {
12079  suballocations1st.clear();
12080  m_1stNullItemsBeginCount = 0;
12081 
12082  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
12083  {
12084  // Swap 1st with 2nd. Now 2nd is empty.
12085  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
12086  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
12087  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
12088  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
12089  {
12090  ++m_1stNullItemsBeginCount;
12091  --m_1stNullItemsMiddleCount;
12092  }
12093  m_2ndNullItemsCount = 0;
12094  m_1stVectorIndex ^= 1;
12095  }
12096  }
12097  }
12098 
12099  VMA_HEAVY_ASSERT(Validate());
12100 }
12101 
12102 
12104 // class VmaBlockMetadata_Buddy
12105 
12106 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
12107  VmaBlockMetadata(hAllocator),
12108  m_Root(VMA_NULL),
12109  m_AllocationCount(0),
12110  m_FreeCount(1),
12111  m_SumFreeSize(0)
12112 {
12113  memset(m_FreeList, 0, sizeof(m_FreeList));
12114 }
12115 
12116 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
12117 {
12118  DeleteNode(m_Root);
12119 }
12120 
12121 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
12122 {
12123  VmaBlockMetadata::Init(size);
12124 
12125  m_UsableSize = VmaPrevPow2(size);
12126  m_SumFreeSize = m_UsableSize;
12127 
12128  // Calculate m_LevelCount.
12129  m_LevelCount = 1;
12130  while(m_LevelCount < MAX_LEVELS &&
12131  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
12132  {
12133  ++m_LevelCount;
12134  }
12135 
12136  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
12137  rootNode->offset = 0;
12138  rootNode->type = Node::TYPE_FREE;
12139  rootNode->parent = VMA_NULL;
12140  rootNode->buddy = VMA_NULL;
12141 
12142  m_Root = rootNode;
12143  AddToFreeListFront(0, rootNode);
12144 }
12145 
12146 bool VmaBlockMetadata_Buddy::Validate() const
12147 {
12148  // Validate tree.
12149  ValidationContext ctx;
12150  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
12151  {
12152  VMA_VALIDATE(false && "ValidateNode failed.");
12153  }
12154  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
12155  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
12156 
12157  // Validate free node lists.
12158  for(uint32_t level = 0; level < m_LevelCount; ++level)
12159  {
12160  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
12161  m_FreeList[level].front->free.prev == VMA_NULL);
12162 
12163  for(Node* node = m_FreeList[level].front;
12164  node != VMA_NULL;
12165  node = node->free.next)
12166  {
12167  VMA_VALIDATE(node->type == Node::TYPE_FREE);
12168 
12169  if(node->free.next == VMA_NULL)
12170  {
12171  VMA_VALIDATE(m_FreeList[level].back == node);
12172  }
12173  else
12174  {
12175  VMA_VALIDATE(node->free.next->free.prev == node);
12176  }
12177  }
12178  }
12179 
12180  // Validate that free lists ar higher levels are empty.
12181  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
12182  {
12183  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
12184  }
12185 
12186  return true;
12187 }
12188 
12189 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
12190 {
12191  for(uint32_t level = 0; level < m_LevelCount; ++level)
12192  {
12193  if(m_FreeList[level].front != VMA_NULL)
12194  {
12195  return LevelToNodeSize(level);
12196  }
12197  }
12198  return 0;
12199 }
12200 
12201 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
12202 {
12203  const VkDeviceSize unusableSize = GetUnusableSize();
12204 
12205  outInfo.blockCount = 1;
12206 
12207  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
12208  outInfo.usedBytes = outInfo.unusedBytes = 0;
12209 
12210  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
12211  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
12212  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
12213 
12214  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
12215 
12216  if(unusableSize > 0)
12217  {
12218  ++outInfo.unusedRangeCount;
12219  outInfo.unusedBytes += unusableSize;
12220  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
12221  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
12222  }
12223 }
12224 
12225 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
12226 {
12227  const VkDeviceSize unusableSize = GetUnusableSize();
12228 
12229  inoutStats.size += GetSize();
12230  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
12231  inoutStats.allocationCount += m_AllocationCount;
12232  inoutStats.unusedRangeCount += m_FreeCount;
12233  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
12234 
12235  if(unusableSize > 0)
12236  {
12237  ++inoutStats.unusedRangeCount;
12238  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
12239  }
12240 }
12241 
12242 #if VMA_STATS_STRING_ENABLED
12243 
12244 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
12245 {
12246  // TODO optimize
12247  VmaStatInfo stat;
12248  CalcAllocationStatInfo(stat);
12249 
12250  PrintDetailedMap_Begin(
12251  json,
12252  stat.unusedBytes,
12253  stat.allocationCount,
12254  stat.unusedRangeCount);
12255 
12256  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
12257 
12258  const VkDeviceSize unusableSize = GetUnusableSize();
12259  if(unusableSize > 0)
12260  {
12261  PrintDetailedMap_UnusedRange(json,
12262  m_UsableSize, // offset
12263  unusableSize); // size
12264  }
12265 
12266  PrintDetailedMap_End(json);
12267 }
12268 
12269 #endif // #if VMA_STATS_STRING_ENABLED
12270 
12271 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
12272  uint32_t currentFrameIndex,
12273  uint32_t frameInUseCount,
12274  VkDeviceSize bufferImageGranularity,
12275  VkDeviceSize allocSize,
12276  VkDeviceSize allocAlignment,
12277  bool upperAddress,
12278  VmaSuballocationType allocType,
12279  bool canMakeOtherLost,
12280  uint32_t strategy,
12281  VmaAllocationRequest* pAllocationRequest)
12282 {
12283  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
12284 
12285  // Simple way to respect bufferImageGranularity. May be optimized some day.
12286  // Whenever it might be an OPTIMAL image...
12287  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
12288  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
12289  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
12290  {
12291  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
12292  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
12293  }
12294 
12295  if(allocSize > m_UsableSize)
12296  {
12297  return false;
12298  }
12299 
12300  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
12301  for(uint32_t level = targetLevel + 1; level--; )
12302  {
12303  for(Node* freeNode = m_FreeList[level].front;
12304  freeNode != VMA_NULL;
12305  freeNode = freeNode->free.next)
12306  {
12307  if(freeNode->offset % allocAlignment == 0)
12308  {
12309  pAllocationRequest->type = VmaAllocationRequestType::Normal;
12310  pAllocationRequest->offset = freeNode->offset;
12311  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
12312  pAllocationRequest->sumItemSize = 0;
12313  pAllocationRequest->itemsToMakeLostCount = 0;
12314  pAllocationRequest->customData = (void*)(uintptr_t)level;
12315  return true;
12316  }
12317  }
12318  }
12319 
12320  return false;
12321 }
12322 
12323 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
12324  uint32_t currentFrameIndex,
12325  uint32_t frameInUseCount,
12326  VmaAllocationRequest* pAllocationRequest)
12327 {
12328  /*
12329  Lost allocations are not supported in buddy allocator at the moment.
12330  Support might be added in the future.
12331  */
12332  return pAllocationRequest->itemsToMakeLostCount == 0;
12333 }
12334 
12335 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
12336 {
12337  /*
12338  Lost allocations are not supported in buddy allocator at the moment.
12339  Support might be added in the future.
12340  */
12341  return 0;
12342 }
12343 
12344 void VmaBlockMetadata_Buddy::Alloc(
12345  const VmaAllocationRequest& request,
12346  VmaSuballocationType type,
12347  VkDeviceSize allocSize,
12348  VmaAllocation hAllocation)
12349 {
12350  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
12351 
12352  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
12353  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
12354 
12355  Node* currNode = m_FreeList[currLevel].front;
12356  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
12357  while(currNode->offset != request.offset)
12358  {
12359  currNode = currNode->free.next;
12360  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
12361  }
12362 
12363  // Go down, splitting free nodes.
12364  while(currLevel < targetLevel)
12365  {
12366  // currNode is already first free node at currLevel.
12367  // Remove it from list of free nodes at this currLevel.
12368  RemoveFromFreeList(currLevel, currNode);
12369 
12370  const uint32_t childrenLevel = currLevel + 1;
12371 
12372  // Create two free sub-nodes.
12373  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
12374  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
12375 
12376  leftChild->offset = currNode->offset;
12377  leftChild->type = Node::TYPE_FREE;
12378  leftChild->parent = currNode;
12379  leftChild->buddy = rightChild;
12380 
12381  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
12382  rightChild->type = Node::TYPE_FREE;
12383  rightChild->parent = currNode;
12384  rightChild->buddy = leftChild;
12385 
12386  // Convert current currNode to split type.
12387  currNode->type = Node::TYPE_SPLIT;
12388  currNode->split.leftChild = leftChild;
12389 
12390  // Add child nodes to free list. Order is important!
12391  AddToFreeListFront(childrenLevel, rightChild);
12392  AddToFreeListFront(childrenLevel, leftChild);
12393 
12394  ++m_FreeCount;
12395  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
12396  ++currLevel;
12397  currNode = m_FreeList[currLevel].front;
12398 
12399  /*
12400  We can be sure that currNode, as left child of node previously split,
12401  also fullfills the alignment requirement.
12402  */
12403  }
12404 
12405  // Remove from free list.
12406  VMA_ASSERT(currLevel == targetLevel &&
12407  currNode != VMA_NULL &&
12408  currNode->type == Node::TYPE_FREE);
12409  RemoveFromFreeList(currLevel, currNode);
12410 
12411  // Convert to allocation node.
12412  currNode->type = Node::TYPE_ALLOCATION;
12413  currNode->allocation.alloc = hAllocation;
12414 
12415  ++m_AllocationCount;
12416  --m_FreeCount;
12417  m_SumFreeSize -= allocSize;
12418 }
12419 
12420 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
12421 {
12422  if(node->type == Node::TYPE_SPLIT)
12423  {
12424  DeleteNode(node->split.leftChild->buddy);
12425  DeleteNode(node->split.leftChild);
12426  }
12427 
12428  vma_delete(GetAllocationCallbacks(), node);
12429 }
12430 
12431 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
12432 {
12433  VMA_VALIDATE(level < m_LevelCount);
12434  VMA_VALIDATE(curr->parent == parent);
12435  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
12436  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
12437  switch(curr->type)
12438  {
12439  case Node::TYPE_FREE:
12440  // curr->free.prev, next are validated separately.
12441  ctx.calculatedSumFreeSize += levelNodeSize;
12442  ++ctx.calculatedFreeCount;
12443  break;
12444  case Node::TYPE_ALLOCATION:
12445  ++ctx.calculatedAllocationCount;
12446  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
12447  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
12448  break;
12449  case Node::TYPE_SPLIT:
12450  {
12451  const uint32_t childrenLevel = level + 1;
12452  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
12453  const Node* const leftChild = curr->split.leftChild;
12454  VMA_VALIDATE(leftChild != VMA_NULL);
12455  VMA_VALIDATE(leftChild->offset == curr->offset);
12456  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
12457  {
12458  VMA_VALIDATE(false && "ValidateNode for left child failed.");
12459  }
12460  const Node* const rightChild = leftChild->buddy;
12461  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
12462  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
12463  {
12464  VMA_VALIDATE(false && "ValidateNode for right child failed.");
12465  }
12466  }
12467  break;
12468  default:
12469  return false;
12470  }
12471 
12472  return true;
12473 }
12474 
12475 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
12476 {
12477  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
12478  uint32_t level = 0;
12479  VkDeviceSize currLevelNodeSize = m_UsableSize;
12480  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
12481  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
12482  {
12483  ++level;
12484  currLevelNodeSize = nextLevelNodeSize;
12485  nextLevelNodeSize = currLevelNodeSize >> 1;
12486  }
12487  return level;
12488 }
12489 
12490 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
12491 {
12492  // Find node and level.
12493  Node* node = m_Root;
12494  VkDeviceSize nodeOffset = 0;
12495  uint32_t level = 0;
12496  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
12497  while(node->type == Node::TYPE_SPLIT)
12498  {
12499  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
12500  if(offset < nodeOffset + nextLevelSize)
12501  {
12502  node = node->split.leftChild;
12503  }
12504  else
12505  {
12506  node = node->split.leftChild->buddy;
12507  nodeOffset += nextLevelSize;
12508  }
12509  ++level;
12510  levelNodeSize = nextLevelSize;
12511  }
12512 
12513  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
12514  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
12515 
12516  ++m_FreeCount;
12517  --m_AllocationCount;
12518  m_SumFreeSize += alloc->GetSize();
12519 
12520  node->type = Node::TYPE_FREE;
12521 
12522  // Join free nodes if possible.
12523  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
12524  {
12525  RemoveFromFreeList(level, node->buddy);
12526  Node* const parent = node->parent;
12527 
12528  vma_delete(GetAllocationCallbacks(), node->buddy);
12529  vma_delete(GetAllocationCallbacks(), node);
12530  parent->type = Node::TYPE_FREE;
12531 
12532  node = parent;
12533  --level;
12534  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
12535  --m_FreeCount;
12536  }
12537 
12538  AddToFreeListFront(level, node);
12539 }
12540 
12541 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
12542 {
12543  switch(node->type)
12544  {
12545  case Node::TYPE_FREE:
12546  ++outInfo.unusedRangeCount;
12547  outInfo.unusedBytes += levelNodeSize;
12548  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
12549  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
12550  break;
12551  case Node::TYPE_ALLOCATION:
12552  {
12553  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12554  ++outInfo.allocationCount;
12555  outInfo.usedBytes += allocSize;
12556  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
12557  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
12558 
12559  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
12560  if(unusedRangeSize > 0)
12561  {
12562  ++outInfo.unusedRangeCount;
12563  outInfo.unusedBytes += unusedRangeSize;
12564  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
12565  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
12566  }
12567  }
12568  break;
12569  case Node::TYPE_SPLIT:
12570  {
12571  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12572  const Node* const leftChild = node->split.leftChild;
12573  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
12574  const Node* const rightChild = leftChild->buddy;
12575  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
12576  }
12577  break;
12578  default:
12579  VMA_ASSERT(0);
12580  }
12581 }
12582 
12583 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
12584 {
12585  VMA_ASSERT(node->type == Node::TYPE_FREE);
12586 
12587  // List is empty.
12588  Node* const frontNode = m_FreeList[level].front;
12589  if(frontNode == VMA_NULL)
12590  {
12591  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
12592  node->free.prev = node->free.next = VMA_NULL;
12593  m_FreeList[level].front = m_FreeList[level].back = node;
12594  }
12595  else
12596  {
12597  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
12598  node->free.prev = VMA_NULL;
12599  node->free.next = frontNode;
12600  frontNode->free.prev = node;
12601  m_FreeList[level].front = node;
12602  }
12603 }
12604 
12605 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
12606 {
12607  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
12608 
12609  // It is at the front.
12610  if(node->free.prev == VMA_NULL)
12611  {
12612  VMA_ASSERT(m_FreeList[level].front == node);
12613  m_FreeList[level].front = node->free.next;
12614  }
12615  else
12616  {
12617  Node* const prevFreeNode = node->free.prev;
12618  VMA_ASSERT(prevFreeNode->free.next == node);
12619  prevFreeNode->free.next = node->free.next;
12620  }
12621 
12622  // It is at the back.
12623  if(node->free.next == VMA_NULL)
12624  {
12625  VMA_ASSERT(m_FreeList[level].back == node);
12626  m_FreeList[level].back = node->free.prev;
12627  }
12628  else
12629  {
12630  Node* const nextFreeNode = node->free.next;
12631  VMA_ASSERT(nextFreeNode->free.prev == node);
12632  nextFreeNode->free.prev = node->free.prev;
12633  }
12634 }
12635 
12636 #if VMA_STATS_STRING_ENABLED
12637 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
12638 {
12639  switch(node->type)
12640  {
12641  case Node::TYPE_FREE:
12642  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
12643  break;
12644  case Node::TYPE_ALLOCATION:
12645  {
12646  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
12647  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12648  if(allocSize < levelNodeSize)
12649  {
12650  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
12651  }
12652  }
12653  break;
12654  case Node::TYPE_SPLIT:
12655  {
12656  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12657  const Node* const leftChild = node->split.leftChild;
12658  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
12659  const Node* const rightChild = leftChild->buddy;
12660  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
12661  }
12662  break;
12663  default:
12664  VMA_ASSERT(0);
12665  }
12666 }
12667 #endif // #if VMA_STATS_STRING_ENABLED
12668 
12669 
12671 // class VmaDeviceMemoryBlock
12672 
12673 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
12674  m_pMetadata(VMA_NULL),
12675  m_MemoryTypeIndex(UINT32_MAX),
12676  m_Id(0),
12677  m_hMemory(VK_NULL_HANDLE),
12678  m_MapCount(0),
12679  m_pMappedData(VMA_NULL)
12680 {
12681 }
12682 
12683 void VmaDeviceMemoryBlock::Init(
12684  VmaAllocator hAllocator,
12685  VmaPool hParentPool,
12686  uint32_t newMemoryTypeIndex,
12687  VkDeviceMemory newMemory,
12688  VkDeviceSize newSize,
12689  uint32_t id,
12690  uint32_t algorithm)
12691 {
12692  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
12693 
12694  m_hParentPool = hParentPool;
12695  m_MemoryTypeIndex = newMemoryTypeIndex;
12696  m_Id = id;
12697  m_hMemory = newMemory;
12698 
12699  switch(algorithm)
12700  {
12702  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
12703  break;
12705  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
12706  break;
12707  default:
12708  VMA_ASSERT(0);
12709  // Fall-through.
12710  case 0:
12711  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
12712  }
12713  m_pMetadata->Init(newSize);
12714 }
12715 
12716 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
12717 {
12718  // This is the most important assert in the entire library.
12719  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
12720  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
12721 
12722  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
12723  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
12724  m_hMemory = VK_NULL_HANDLE;
12725 
12726  vma_delete(allocator, m_pMetadata);
12727  m_pMetadata = VMA_NULL;
12728 }
12729 
12730 bool VmaDeviceMemoryBlock::Validate() const
12731 {
12732  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
12733  (m_pMetadata->GetSize() != 0));
12734 
12735  return m_pMetadata->Validate();
12736 }
12737 
12738 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
12739 {
12740  void* pData = nullptr;
12741  VkResult res = Map(hAllocator, 1, &pData);
12742  if(res != VK_SUCCESS)
12743  {
12744  return res;
12745  }
12746 
12747  res = m_pMetadata->CheckCorruption(pData);
12748 
12749  Unmap(hAllocator, 1);
12750 
12751  return res;
12752 }
12753 
12754 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
12755 {
12756  if(count == 0)
12757  {
12758  return VK_SUCCESS;
12759  }
12760 
12761  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12762  if(m_MapCount != 0)
12763  {
12764  m_MapCount += count;
12765  VMA_ASSERT(m_pMappedData != VMA_NULL);
12766  if(ppData != VMA_NULL)
12767  {
12768  *ppData = m_pMappedData;
12769  }
12770  return VK_SUCCESS;
12771  }
12772  else
12773  {
12774  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
12775  hAllocator->m_hDevice,
12776  m_hMemory,
12777  0, // offset
12778  VK_WHOLE_SIZE,
12779  0, // flags
12780  &m_pMappedData);
12781  if(result == VK_SUCCESS)
12782  {
12783  if(ppData != VMA_NULL)
12784  {
12785  *ppData = m_pMappedData;
12786  }
12787  m_MapCount = count;
12788  }
12789  return result;
12790  }
12791 }
12792 
12793 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
12794 {
12795  if(count == 0)
12796  {
12797  return;
12798  }
12799 
12800  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12801  if(m_MapCount >= count)
12802  {
12803  m_MapCount -= count;
12804  if(m_MapCount == 0)
12805  {
12806  m_pMappedData = VMA_NULL;
12807  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
12808  }
12809  }
12810  else
12811  {
12812  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
12813  }
12814 }
12815 
12816 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12817 {
12818  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12819  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12820 
12821  void* pData;
12822  VkResult res = Map(hAllocator, 1, &pData);
12823  if(res != VK_SUCCESS)
12824  {
12825  return res;
12826  }
12827 
12828  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
12829  VmaWriteMagicValue(pData, allocOffset + allocSize);
12830 
12831  Unmap(hAllocator, 1);
12832 
12833  return VK_SUCCESS;
12834 }
12835 
12836 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12837 {
12838  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12839  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12840 
12841  void* pData;
12842  VkResult res = Map(hAllocator, 1, &pData);
12843  if(res != VK_SUCCESS)
12844  {
12845  return res;
12846  }
12847 
12848  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
12849  {
12850  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
12851  }
12852  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
12853  {
12854  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
12855  }
12856 
12857  Unmap(hAllocator, 1);
12858 
12859  return VK_SUCCESS;
12860 }
12861 
12862 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
12863  const VmaAllocator hAllocator,
12864  const VmaAllocation hAllocation,
12865  VkDeviceSize allocationLocalOffset,
12866  VkBuffer hBuffer,
12867  const void* pNext)
12868 {
12869  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12870  hAllocation->GetBlock() == this);
12871  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12872  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12873  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12874  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12875  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12876  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
12877 }
12878 
12879 VkResult VmaDeviceMemoryBlock::BindImageMemory(
12880  const VmaAllocator hAllocator,
12881  const VmaAllocation hAllocation,
12882  VkDeviceSize allocationLocalOffset,
12883  VkImage hImage,
12884  const void* pNext)
12885 {
12886  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12887  hAllocation->GetBlock() == this);
12888  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12889  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12890  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12891  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12892  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12893  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
12894 }
12895 
12896 static void InitStatInfo(VmaStatInfo& outInfo)
12897 {
12898  memset(&outInfo, 0, sizeof(outInfo));
12899  outInfo.allocationSizeMin = UINT64_MAX;
12900  outInfo.unusedRangeSizeMin = UINT64_MAX;
12901 }
12902 
12903 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
12904 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
12905 {
12906  inoutInfo.blockCount += srcInfo.blockCount;
12907  inoutInfo.allocationCount += srcInfo.allocationCount;
12908  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
12909  inoutInfo.usedBytes += srcInfo.usedBytes;
12910  inoutInfo.unusedBytes += srcInfo.unusedBytes;
12911  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
12912  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
12913  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
12914  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
12915 }
12916 
12917 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
12918 {
12919  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
12920  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
12921  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
12922  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
12923 }
12924 
12925 VmaPool_T::VmaPool_T(
12926  VmaAllocator hAllocator,
12927  const VmaPoolCreateInfo& createInfo,
12928  VkDeviceSize preferredBlockSize) :
12929  m_BlockVector(
12930  hAllocator,
12931  this, // hParentPool
12932  createInfo.memoryTypeIndex,
12933  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
12934  createInfo.minBlockCount,
12935  createInfo.maxBlockCount,
12936  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
12937  createInfo.frameInUseCount,
12938  createInfo.blockSize != 0, // explicitBlockSize
12939  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
12940  createInfo.priority,
12941  VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
12942  createInfo.pMemoryAllocateNext),
12943  m_Id(0),
12944  m_Name(VMA_NULL)
12945 {
12946 }
12947 
12948 VmaPool_T::~VmaPool_T()
12949 {
12950  VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
12951 }
12952 
12953 void VmaPool_T::SetName(const char* pName)
12954 {
12955  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12956  VmaFreeString(allocs, m_Name);
12957 
12958  if(pName != VMA_NULL)
12959  {
12960  m_Name = VmaCreateStringCopy(allocs, pName);
12961  }
12962  else
12963  {
12964  m_Name = VMA_NULL;
12965  }
12966 }
12967 
12968 #if VMA_STATS_STRING_ENABLED
12969 
12970 #endif // #if VMA_STATS_STRING_ENABLED
12971 
12972 VmaBlockVector::VmaBlockVector(
12973  VmaAllocator hAllocator,
12974  VmaPool hParentPool,
12975  uint32_t memoryTypeIndex,
12976  VkDeviceSize preferredBlockSize,
12977  size_t minBlockCount,
12978  size_t maxBlockCount,
12979  VkDeviceSize bufferImageGranularity,
12980  uint32_t frameInUseCount,
12981  bool explicitBlockSize,
12982  uint32_t algorithm,
12983  float priority,
12984  VkDeviceSize minAllocationAlignment,
12985  void* pMemoryAllocateNext) :
12986  m_hAllocator(hAllocator),
12987  m_hParentPool(hParentPool),
12988  m_MemoryTypeIndex(memoryTypeIndex),
12989  m_PreferredBlockSize(preferredBlockSize),
12990  m_MinBlockCount(minBlockCount),
12991  m_MaxBlockCount(maxBlockCount),
12992  m_BufferImageGranularity(bufferImageGranularity),
12993  m_FrameInUseCount(frameInUseCount),
12994  m_ExplicitBlockSize(explicitBlockSize),
12995  m_Algorithm(algorithm),
12996  m_Priority(priority),
12997  m_MinAllocationAlignment(minAllocationAlignment),
12998  m_pMemoryAllocateNext(pMemoryAllocateNext),
12999  m_HasEmptyBlock(false),
13000  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
13001  m_NextBlockId(0)
13002 {
13003 }
13004 
13005 VmaBlockVector::~VmaBlockVector()
13006 {
13007  for(size_t i = m_Blocks.size(); i--; )
13008  {
13009  m_Blocks[i]->Destroy(m_hAllocator);
13010  vma_delete(m_hAllocator, m_Blocks[i]);
13011  }
13012 }
13013 
13014 VkResult VmaBlockVector::CreateMinBlocks()
13015 {
13016  for(size_t i = 0; i < m_MinBlockCount; ++i)
13017  {
13018  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
13019  if(res != VK_SUCCESS)
13020  {
13021  return res;
13022  }
13023  }
13024  return VK_SUCCESS;
13025 }
13026 
13027 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
13028 {
13029  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13030 
13031  const size_t blockCount = m_Blocks.size();
13032 
13033  pStats->size = 0;
13034  pStats->unusedSize = 0;
13035  pStats->allocationCount = 0;
13036  pStats->unusedRangeCount = 0;
13037  pStats->unusedRangeSizeMax = 0;
13038  pStats->blockCount = blockCount;
13039 
13040  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13041  {
13042  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13043  VMA_ASSERT(pBlock);
13044  VMA_HEAVY_ASSERT(pBlock->Validate());
13045  pBlock->m_pMetadata->AddPoolStats(*pStats);
13046  }
13047 }
13048 
13049 bool VmaBlockVector::IsEmpty()
13050 {
13051  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13052  return m_Blocks.empty();
13053 }
13054 
13055 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
13056 {
13057  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13058  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
13059  (VMA_DEBUG_MARGIN > 0) &&
13060  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
13061  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
13062 }
13063 
13064 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
13065 
13066 VkResult VmaBlockVector::Allocate(
13067  uint32_t currentFrameIndex,
13068  VkDeviceSize size,
13069  VkDeviceSize alignment,
13070  const VmaAllocationCreateInfo& createInfo,
13071  VmaSuballocationType suballocType,
13072  size_t allocationCount,
13073  VmaAllocation* pAllocations)
13074 {
13075  size_t allocIndex;
13076  VkResult res = VK_SUCCESS;
13077 
13078  alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
13079 
13080  if(IsCorruptionDetectionEnabled())
13081  {
13082  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
13083  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
13084  }
13085 
13086  {
13087  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13088  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13089  {
13090  res = AllocatePage(
13091  currentFrameIndex,
13092  size,
13093  alignment,
13094  createInfo,
13095  suballocType,
13096  pAllocations + allocIndex);
13097  if(res != VK_SUCCESS)
13098  {
13099  break;
13100  }
13101  }
13102  }
13103 
13104  if(res != VK_SUCCESS)
13105  {
13106  // Free all already created allocations.
13107  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13108  while(allocIndex--)
13109  {
13110  VmaAllocation_T* const alloc = pAllocations[allocIndex];
13111  const VkDeviceSize allocSize = alloc->GetSize();
13112  Free(alloc);
13113  m_hAllocator->m_Budget.RemoveAllocation(heapIndex, allocSize);
13114  }
13115  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
13116  }
13117 
13118  return res;
13119 }
13120 
13121 VkResult VmaBlockVector::AllocatePage(
13122  uint32_t currentFrameIndex,
13123  VkDeviceSize size,
13124  VkDeviceSize alignment,
13125  const VmaAllocationCreateInfo& createInfo,
13126  VmaSuballocationType suballocType,
13127  VmaAllocation* pAllocation)
13128 {
13129  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13130  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
13131  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13132  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13133 
13134  VkDeviceSize freeMemory;
13135  {
13136  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13137  VmaBudget heapBudget = {};
13138  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
13139  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
13140  }
13141 
13142  const bool canFallbackToDedicated = !IsCustomPool();
13143  const bool canCreateNewBlock =
13144  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
13145  (m_Blocks.size() < m_MaxBlockCount) &&
13146  (freeMemory >= size || !canFallbackToDedicated);
13147  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
13148 
13149  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
13150  // Which in turn is available only when maxBlockCount = 1.
13151  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
13152  {
13153  canMakeOtherLost = false;
13154  }
13155 
13156  // Upper address can only be used with linear allocator and within single memory block.
13157  if(isUpperAddress &&
13158  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
13159  {
13160  return VK_ERROR_FEATURE_NOT_PRESENT;
13161  }
13162 
13163  // Validate strategy.
13164  switch(strategy)
13165  {
13166  case 0:
13168  break;
13172  break;
13173  default:
13174  return VK_ERROR_FEATURE_NOT_PRESENT;
13175  }
13176 
13177  // Early reject: requested allocation size is larger that maximum block size for this block vector.
13178  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
13179  {
13180  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13181  }
13182 
13183  /*
13184  Under certain condition, this whole section can be skipped for optimization, so
13185  we move on directly to trying to allocate with canMakeOtherLost. That's the case
13186  e.g. for custom pools with linear algorithm.
13187  */
13188  if(!canMakeOtherLost || canCreateNewBlock)
13189  {
13190  // 1. Search existing allocations. Try to allocate without making other allocations lost.
13191  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
13193 
13194  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13195  {
13196  // Use only last block.
13197  if(!m_Blocks.empty())
13198  {
13199  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
13200  VMA_ASSERT(pCurrBlock);
13201  VkResult res = AllocateFromBlock(
13202  pCurrBlock,
13203  currentFrameIndex,
13204  size,
13205  alignment,
13206  allocFlagsCopy,
13207  createInfo.pUserData,
13208  suballocType,
13209  strategy,
13210  pAllocation);
13211  if(res == VK_SUCCESS)
13212  {
13213  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
13214  return VK_SUCCESS;
13215  }
13216  }
13217  }
13218  else
13219  {
13221  {
13222  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
13223  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
13224  {
13225  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13226  VMA_ASSERT(pCurrBlock);
13227  VkResult res = AllocateFromBlock(
13228  pCurrBlock,
13229  currentFrameIndex,
13230  size,
13231  alignment,
13232  allocFlagsCopy,
13233  createInfo.pUserData,
13234  suballocType,
13235  strategy,
13236  pAllocation);
13237  if(res == VK_SUCCESS)
13238  {
13239  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
13240  return VK_SUCCESS;
13241  }
13242  }
13243  }
13244  else // WORST_FIT, FIRST_FIT
13245  {
13246  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
13247  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13248  {
13249  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13250  VMA_ASSERT(pCurrBlock);
13251  VkResult res = AllocateFromBlock(
13252  pCurrBlock,
13253  currentFrameIndex,
13254  size,
13255  alignment,
13256  allocFlagsCopy,
13257  createInfo.pUserData,
13258  suballocType,
13259  strategy,
13260  pAllocation);
13261  if(res == VK_SUCCESS)
13262  {
13263  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
13264  return VK_SUCCESS;
13265  }
13266  }
13267  }
13268  }
13269 
13270  // 2. Try to create new block.
13271  if(canCreateNewBlock)
13272  {
13273  // Calculate optimal size for new block.
13274  VkDeviceSize newBlockSize = m_PreferredBlockSize;
13275  uint32_t newBlockSizeShift = 0;
13276  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
13277 
13278  if(!m_ExplicitBlockSize)
13279  {
13280  // Allocate 1/8, 1/4, 1/2 as first blocks.
13281  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
13282  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
13283  {
13284  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
13285  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
13286  {
13287  newBlockSize = smallerNewBlockSize;
13288  ++newBlockSizeShift;
13289  }
13290  else
13291  {
13292  break;
13293  }
13294  }
13295  }
13296 
13297  size_t newBlockIndex = 0;
13298  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
13299  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
13300  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
13301  if(!m_ExplicitBlockSize)
13302  {
13303  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
13304  {
13305  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
13306  if(smallerNewBlockSize >= size)
13307  {
13308  newBlockSize = smallerNewBlockSize;
13309  ++newBlockSizeShift;
13310  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
13311  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
13312  }
13313  else
13314  {
13315  break;
13316  }
13317  }
13318  }
13319 
13320  if(res == VK_SUCCESS)
13321  {
13322  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
13323  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
13324 
13325  res = AllocateFromBlock(
13326  pBlock,
13327  currentFrameIndex,
13328  size,
13329  alignment,
13330  allocFlagsCopy,
13331  createInfo.pUserData,
13332  suballocType,
13333  strategy,
13334  pAllocation);
13335  if(res == VK_SUCCESS)
13336  {
13337  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
13338  return VK_SUCCESS;
13339  }
13340  else
13341  {
13342  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
13343  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13344  }
13345  }
13346  }
13347  }
13348 
13349  // 3. Try to allocate from existing blocks with making other allocations lost.
13350  if(canMakeOtherLost)
13351  {
13352  uint32_t tryIndex = 0;
13353  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
13354  {
13355  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
13356  VmaAllocationRequest bestRequest = {};
13357  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
13358 
13359  // 1. Search existing allocations.
13361  {
13362  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
13363  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
13364  {
13365  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13366  VMA_ASSERT(pCurrBlock);
13367  VmaAllocationRequest currRequest = {};
13368  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
13369  currentFrameIndex,
13370  m_FrameInUseCount,
13371  m_BufferImageGranularity,
13372  size,
13373  alignment,
13374  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
13375  suballocType,
13376  canMakeOtherLost,
13377  strategy,
13378  &currRequest))
13379  {
13380  const VkDeviceSize currRequestCost = currRequest.CalcCost();
13381  if(pBestRequestBlock == VMA_NULL ||
13382  currRequestCost < bestRequestCost)
13383  {
13384  pBestRequestBlock = pCurrBlock;
13385  bestRequest = currRequest;
13386  bestRequestCost = currRequestCost;
13387 
13388  if(bestRequestCost == 0)
13389  {
13390  break;
13391  }
13392  }
13393  }
13394  }
13395  }
13396  else // WORST_FIT, FIRST_FIT
13397  {
13398  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
13399  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13400  {
13401  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13402  VMA_ASSERT(pCurrBlock);
13403  VmaAllocationRequest currRequest = {};
13404  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
13405  currentFrameIndex,
13406  m_FrameInUseCount,
13407  m_BufferImageGranularity,
13408  size,
13409  alignment,
13410  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
13411  suballocType,
13412  canMakeOtherLost,
13413  strategy,
13414  &currRequest))
13415  {
13416  const VkDeviceSize currRequestCost = currRequest.CalcCost();
13417  if(pBestRequestBlock == VMA_NULL ||
13418  currRequestCost < bestRequestCost ||
13420  {
13421  pBestRequestBlock = pCurrBlock;
13422  bestRequest = currRequest;
13423  bestRequestCost = currRequestCost;
13424 
13425  if(bestRequestCost == 0 ||
13427  {
13428  break;
13429  }
13430  }
13431  }
13432  }
13433  }
13434 
13435  if(pBestRequestBlock != VMA_NULL)
13436  {
13437  if(mapped)
13438  {
13439  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
13440  if(res != VK_SUCCESS)
13441  {
13442  return res;
13443  }
13444  }
13445 
13446  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
13447  currentFrameIndex,
13448  m_FrameInUseCount,
13449  &bestRequest))
13450  {
13451  // Allocate from this pBlock.
13452  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13453  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
13454  UpdateHasEmptyBlock();
13455  (*pAllocation)->InitBlockAllocation(
13456  pBestRequestBlock,
13457  bestRequest.offset,
13458  alignment,
13459  size,
13460  m_MemoryTypeIndex,
13461  suballocType,
13462  mapped,
13463  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13464  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
13465  VMA_DEBUG_LOG(" Returned from existing block");
13466  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
13467  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13468  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13469  {
13470  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13471  }
13472  if(IsCorruptionDetectionEnabled())
13473  {
13474  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
13475  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13476  }
13477  return VK_SUCCESS;
13478  }
13479  // else: Some allocations must have been touched while we are here. Next try.
13480  }
13481  else
13482  {
13483  // Could not find place in any of the blocks - break outer loop.
13484  break;
13485  }
13486  }
13487  /* Maximum number of tries exceeded - a very unlike event when many other
13488  threads are simultaneously touching allocations making it impossible to make
13489  lost at the same time as we try to allocate. */
13490  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
13491  {
13492  return VK_ERROR_TOO_MANY_OBJECTS;
13493  }
13494  }
13495 
13496  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13497 }
13498 
13499 void VmaBlockVector::Free(
13500  const VmaAllocation hAllocation)
13501 {
13502  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
13503 
13504  bool budgetExceeded = false;
13505  {
13506  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13507  VmaBudget heapBudget = {};
13508  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
13509  budgetExceeded = heapBudget.usage >= heapBudget.budget;
13510  }
13511 
13512  // Scope for lock.
13513  {
13514  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13515 
13516  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13517 
13518  if(IsCorruptionDetectionEnabled())
13519  {
13520  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
13521  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
13522  }
13523 
13524  if(hAllocation->IsPersistentMap())
13525  {
13526  pBlock->Unmap(m_hAllocator, 1);
13527  }
13528 
13529  pBlock->m_pMetadata->Free(hAllocation);
13530  VMA_HEAVY_ASSERT(pBlock->Validate());
13531 
13532  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
13533 
13534  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
13535  // pBlock became empty after this deallocation.
13536  if(pBlock->m_pMetadata->IsEmpty())
13537  {
13538  // Already has empty block. We don't want to have two, so delete this one.
13539  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
13540  {
13541  pBlockToDelete = pBlock;
13542  Remove(pBlock);
13543  }
13544  // else: We now have an empty block - leave it.
13545  }
13546  // pBlock didn't become empty, but we have another empty block - find and free that one.
13547  // (This is optional, heuristics.)
13548  else if(m_HasEmptyBlock && canDeleteBlock)
13549  {
13550  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
13551  if(pLastBlock->m_pMetadata->IsEmpty())
13552  {
13553  pBlockToDelete = pLastBlock;
13554  m_Blocks.pop_back();
13555  }
13556  }
13557 
13558  UpdateHasEmptyBlock();
13559  IncrementallySortBlocks();
13560  }
13561 
13562  // Destruction of a free block. Deferred until this point, outside of mutex
13563  // lock, for performance reason.
13564  if(pBlockToDelete != VMA_NULL)
13565  {
13566  VMA_DEBUG_LOG(" Deleted empty block");
13567  pBlockToDelete->Destroy(m_hAllocator);
13568  vma_delete(m_hAllocator, pBlockToDelete);
13569  }
13570 }
13571 
13572 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
13573 {
13574  VkDeviceSize result = 0;
13575  for(size_t i = m_Blocks.size(); i--; )
13576  {
13577  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
13578  if(result >= m_PreferredBlockSize)
13579  {
13580  break;
13581  }
13582  }
13583  return result;
13584 }
13585 
13586 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
13587 {
13588  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13589  {
13590  if(m_Blocks[blockIndex] == pBlock)
13591  {
13592  VmaVectorRemove(m_Blocks, blockIndex);
13593  return;
13594  }
13595  }
13596  VMA_ASSERT(0);
13597 }
13598 
13599 void VmaBlockVector::IncrementallySortBlocks()
13600 {
13601  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13602  {
13603  // Bubble sort only until first swap.
13604  for(size_t i = 1; i < m_Blocks.size(); ++i)
13605  {
13606  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
13607  {
13608  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
13609  return;
13610  }
13611  }
13612  }
13613 }
13614 
13615 VkResult VmaBlockVector::AllocateFromBlock(
13616  VmaDeviceMemoryBlock* pBlock,
13617  uint32_t currentFrameIndex,
13618  VkDeviceSize size,
13619  VkDeviceSize alignment,
13620  VmaAllocationCreateFlags allocFlags,
13621  void* pUserData,
13622  VmaSuballocationType suballocType,
13623  uint32_t strategy,
13624  VmaAllocation* pAllocation)
13625 {
13626  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
13627  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13628  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13629  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13630 
13631  VmaAllocationRequest currRequest = {};
13632  if(pBlock->m_pMetadata->CreateAllocationRequest(
13633  currentFrameIndex,
13634  m_FrameInUseCount,
13635  m_BufferImageGranularity,
13636  size,
13637  alignment,
13638  isUpperAddress,
13639  suballocType,
13640  false, // canMakeOtherLost
13641  strategy,
13642  &currRequest))
13643  {
13644  // Allocate from pCurrBlock.
13645  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
13646 
13647  if(mapped)
13648  {
13649  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
13650  if(res != VK_SUCCESS)
13651  {
13652  return res;
13653  }
13654  }
13655 
13656  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13657  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
13658  UpdateHasEmptyBlock();
13659  (*pAllocation)->InitBlockAllocation(
13660  pBlock,
13661  currRequest.offset,
13662  alignment,
13663  size,
13664  m_MemoryTypeIndex,
13665  suballocType,
13666  mapped,
13667  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13668  VMA_HEAVY_ASSERT(pBlock->Validate());
13669  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
13670  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13671  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13672  {
13673  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13674  }
13675  if(IsCorruptionDetectionEnabled())
13676  {
13677  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
13678  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13679  }
13680  return VK_SUCCESS;
13681  }
13682  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13683 }
13684 
13685 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
13686 {
13687  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
13688  allocInfo.pNext = m_pMemoryAllocateNext;
13689  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
13690  allocInfo.allocationSize = blockSize;
13691 
13692 #if VMA_BUFFER_DEVICE_ADDRESS
13693  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
13694  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
13695  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
13696  {
13697  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
13698  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
13699  }
13700 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
13701 
13702 #if VMA_MEMORY_PRIORITY
13703  VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
13704  if(m_hAllocator->m_UseExtMemoryPriority)
13705  {
13706  priorityInfo.priority = m_Priority;
13707  VmaPnextChainPushFront(&allocInfo, &priorityInfo);
13708  }
13709 #endif // #if VMA_MEMORY_PRIORITY
13710 
13711 #if VMA_EXTERNAL_MEMORY
13712  // Attach VkExportMemoryAllocateInfoKHR if necessary.
13713  VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
13714  exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);
13715  if(exportMemoryAllocInfo.handleTypes != 0)
13716  {
13717  VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
13718  }
13719 #endif // #if VMA_EXTERNAL_MEMORY
13720 
13721  VkDeviceMemory mem = VK_NULL_HANDLE;
13722  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
13723  if(res < 0)
13724  {
13725  return res;
13726  }
13727 
13728  // New VkDeviceMemory successfully created.
13729 
13730  // Create new Allocation for it.
13731  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
13732  pBlock->Init(
13733  m_hAllocator,
13734  m_hParentPool,
13735  m_MemoryTypeIndex,
13736  mem,
13737  allocInfo.allocationSize,
13738  m_NextBlockId++,
13739  m_Algorithm);
13740 
13741  m_Blocks.push_back(pBlock);
13742  if(pNewBlockIndex != VMA_NULL)
13743  {
13744  *pNewBlockIndex = m_Blocks.size() - 1;
13745  }
13746 
13747  return VK_SUCCESS;
13748 }
13749 
13750 void VmaBlockVector::ApplyDefragmentationMovesCpu(
13751  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13752  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
13753 {
13754  const size_t blockCount = m_Blocks.size();
13755  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
13756 
13757  enum BLOCK_FLAG
13758  {
13759  BLOCK_FLAG_USED = 0x00000001,
13760  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
13761  };
13762 
13763  struct BlockInfo
13764  {
13765  uint32_t flags;
13766  void* pMappedData;
13767  };
13768  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
13769  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
13770  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
13771 
13772  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13773  const size_t moveCount = moves.size();
13774  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13775  {
13776  const VmaDefragmentationMove& move = moves[moveIndex];
13777  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
13778  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
13779  }
13780 
13781  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13782 
13783  // Go over all blocks. Get mapped pointer or map if necessary.
13784  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13785  {
13786  BlockInfo& currBlockInfo = blockInfo[blockIndex];
13787  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13788  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
13789  {
13790  currBlockInfo.pMappedData = pBlock->GetMappedData();
13791  // It is not originally mapped - map it.
13792  if(currBlockInfo.pMappedData == VMA_NULL)
13793  {
13794  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
13795  if(pDefragCtx->res == VK_SUCCESS)
13796  {
13797  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
13798  }
13799  }
13800  }
13801  }
13802 
13803  // Go over all moves. Do actual data transfer.
13804  if(pDefragCtx->res == VK_SUCCESS)
13805  {
13806  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13807  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13808 
13809  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13810  {
13811  const VmaDefragmentationMove& move = moves[moveIndex];
13812 
13813  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
13814  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
13815 
13816  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
13817 
13818  // Invalidate source.
13819  if(isNonCoherent)
13820  {
13821  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
13822  memRange.memory = pSrcBlock->GetDeviceMemory();
13823  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
13824  memRange.size = VMA_MIN(
13825  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
13826  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
13827  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13828  }
13829 
13830  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
13831  memmove(
13832  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
13833  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
13834  static_cast<size_t>(move.size));
13835 
13836  if(IsCorruptionDetectionEnabled())
13837  {
13838  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
13839  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
13840  }
13841 
13842  // Flush destination.
13843  if(isNonCoherent)
13844  {
13845  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
13846  memRange.memory = pDstBlock->GetDeviceMemory();
13847  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
13848  memRange.size = VMA_MIN(
13849  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
13850  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
13851  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13852  }
13853  }
13854  }
13855 
13856  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
13857  // Regardless of pCtx->res == VK_SUCCESS.
13858  for(size_t blockIndex = blockCount; blockIndex--; )
13859  {
13860  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
13861  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
13862  {
13863  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13864  pBlock->Unmap(m_hAllocator, 1);
13865  }
13866  }
13867 }
13868 
13869 void VmaBlockVector::ApplyDefragmentationMovesGpu(
13870  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13871  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13872  VkCommandBuffer commandBuffer)
13873 {
13874  const size_t blockCount = m_Blocks.size();
13875 
13876  pDefragCtx->blockContexts.resize(blockCount);
13877  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
13878 
13879  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13880  const size_t moveCount = moves.size();
13881  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13882  {
13883  const VmaDefragmentationMove& move = moves[moveIndex];
13884 
13885  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
13886  {
13887  // Old school move still require us to map the whole block
13888  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13889  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13890  }
13891  }
13892 
13893  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13894 
13895  // Go over all blocks. Create and bind buffer for whole block if necessary.
13896  {
13897  VkBufferCreateInfo bufCreateInfo;
13898  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
13899 
13900  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13901  {
13902  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
13903  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13904  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
13905  {
13906  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
13907  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
13908  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
13909  if(pDefragCtx->res == VK_SUCCESS)
13910  {
13911  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
13912  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
13913  }
13914  }
13915  }
13916  }
13917 
13918  // Go over all moves. Post data transfer commands to command buffer.
13919  if(pDefragCtx->res == VK_SUCCESS)
13920  {
13921  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13922  {
13923  const VmaDefragmentationMove& move = moves[moveIndex];
13924 
13925  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
13926  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
13927 
13928  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
13929 
13930  VkBufferCopy region = {
13931  move.srcOffset,
13932  move.dstOffset,
13933  move.size };
13934  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
13935  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
13936  }
13937  }
13938 
13939  // Save buffers to defrag context for later destruction.
13940  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
13941  {
13942  pDefragCtx->res = VK_NOT_READY;
13943  }
13944 }
13945 
13946 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
13947 {
13948  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13949  {
13950  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13951  if(pBlock->m_pMetadata->IsEmpty())
13952  {
13953  if(m_Blocks.size() > m_MinBlockCount)
13954  {
13955  if(pDefragmentationStats != VMA_NULL)
13956  {
13957  ++pDefragmentationStats->deviceMemoryBlocksFreed;
13958  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
13959  }
13960 
13961  VmaVectorRemove(m_Blocks, blockIndex);
13962  pBlock->Destroy(m_hAllocator);
13963  vma_delete(m_hAllocator, pBlock);
13964  }
13965  else
13966  {
13967  break;
13968  }
13969  }
13970  }
13971  UpdateHasEmptyBlock();
13972 }
13973 
13974 void VmaBlockVector::UpdateHasEmptyBlock()
13975 {
13976  m_HasEmptyBlock = false;
13977  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
13978  {
13979  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
13980  if(pBlock->m_pMetadata->IsEmpty())
13981  {
13982  m_HasEmptyBlock = true;
13983  break;
13984  }
13985  }
13986 }
13987 
13988 #if VMA_STATS_STRING_ENABLED
13989 
13990 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13991 {
13992  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13993 
13994  json.BeginObject();
13995 
13996  if(IsCustomPool())
13997  {
13998  const char* poolName = m_hParentPool->GetName();
13999  if(poolName != VMA_NULL && poolName[0] != '\0')
14000  {
14001  json.WriteString("Name");
14002  json.WriteString(poolName);
14003  }
14004 
14005  json.WriteString("MemoryTypeIndex");
14006  json.WriteNumber(m_MemoryTypeIndex);
14007 
14008  json.WriteString("BlockSize");
14009  json.WriteNumber(m_PreferredBlockSize);
14010 
14011  json.WriteString("BlockCount");
14012  json.BeginObject(true);
14013  if(m_MinBlockCount > 0)
14014  {
14015  json.WriteString("Min");
14016  json.WriteNumber((uint64_t)m_MinBlockCount);
14017  }
14018  if(m_MaxBlockCount < SIZE_MAX)
14019  {
14020  json.WriteString("Max");
14021  json.WriteNumber((uint64_t)m_MaxBlockCount);
14022  }
14023  json.WriteString("Cur");
14024  json.WriteNumber((uint64_t)m_Blocks.size());
14025  json.EndObject();
14026 
14027  if(m_FrameInUseCount > 0)
14028  {
14029  json.WriteString("FrameInUseCount");
14030  json.WriteNumber(m_FrameInUseCount);
14031  }
14032 
14033  if(m_Algorithm != 0)
14034  {
14035  json.WriteString("Algorithm");
14036  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
14037  }
14038  }
14039  else
14040  {
14041  json.WriteString("PreferredBlockSize");
14042  json.WriteNumber(m_PreferredBlockSize);
14043  }
14044 
14045  json.WriteString("Blocks");
14046  json.BeginObject();
14047  for(size_t i = 0; i < m_Blocks.size(); ++i)
14048  {
14049  json.BeginString();
14050  json.ContinueString(m_Blocks[i]->GetId());
14051  json.EndString();
14052 
14053  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
14054  }
14055  json.EndObject();
14056 
14057  json.EndObject();
14058 }
14059 
14060 #endif // #if VMA_STATS_STRING_ENABLED
14061 
14062 void VmaBlockVector::Defragment(
14063  class VmaBlockVectorDefragmentationContext* pCtx,
14065  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
14066  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
14067  VkCommandBuffer commandBuffer)
14068 {
14069  pCtx->res = VK_SUCCESS;
14070 
14071  const VkMemoryPropertyFlags memPropFlags =
14072  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
14073  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
14074 
14075  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
14076  isHostVisible;
14077  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
14078  !IsCorruptionDetectionEnabled() &&
14079  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
14080 
14081  // There are options to defragment this memory type.
14082  if(canDefragmentOnCpu || canDefragmentOnGpu)
14083  {
14084  bool defragmentOnGpu;
14085  // There is only one option to defragment this memory type.
14086  if(canDefragmentOnGpu != canDefragmentOnCpu)
14087  {
14088  defragmentOnGpu = canDefragmentOnGpu;
14089  }
14090  // Both options are available: Heuristics to choose the best one.
14091  else
14092  {
14093  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
14094  m_hAllocator->IsIntegratedGpu();
14095  }
14096 
14097  bool overlappingMoveSupported = !defragmentOnGpu;
14098 
14099  if(m_hAllocator->m_UseMutex)
14100  {
14102  {
14103  if(!m_Mutex.TryLockWrite())
14104  {
14105  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
14106  return;
14107  }
14108  }
14109  else
14110  {
14111  m_Mutex.LockWrite();
14112  pCtx->mutexLocked = true;
14113  }
14114  }
14115 
14116  pCtx->Begin(overlappingMoveSupported, flags);
14117 
14118  // Defragment.
14119 
14120  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
14121  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
14122  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
14123 
14124  // Accumulate statistics.
14125  if(pStats != VMA_NULL)
14126  {
14127  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
14128  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
14129  pStats->bytesMoved += bytesMoved;
14130  pStats->allocationsMoved += allocationsMoved;
14131  VMA_ASSERT(bytesMoved <= maxBytesToMove);
14132  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
14133  if(defragmentOnGpu)
14134  {
14135  maxGpuBytesToMove -= bytesMoved;
14136  maxGpuAllocationsToMove -= allocationsMoved;
14137  }
14138  else
14139  {
14140  maxCpuBytesToMove -= bytesMoved;
14141  maxCpuAllocationsToMove -= allocationsMoved;
14142  }
14143  }
14144 
14146  {
14147  if(m_hAllocator->m_UseMutex)
14148  m_Mutex.UnlockWrite();
14149 
14150  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
14151  pCtx->res = VK_NOT_READY;
14152 
14153  return;
14154  }
14155 
14156  if(pCtx->res >= VK_SUCCESS)
14157  {
14158  if(defragmentOnGpu)
14159  {
14160  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
14161  }
14162  else
14163  {
14164  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
14165  }
14166  }
14167  }
14168 }
14169 
14170 void VmaBlockVector::DefragmentationEnd(
14171  class VmaBlockVectorDefragmentationContext* pCtx,
14172  uint32_t flags,
14173  VmaDefragmentationStats* pStats)
14174 {
14175  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
14176  {
14177  VMA_ASSERT(pCtx->mutexLocked == false);
14178 
14179  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
14180  // lock protecting us. Since we mutate state here, we have to take the lock out now
14181  m_Mutex.LockWrite();
14182  pCtx->mutexLocked = true;
14183  }
14184 
14185  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
14186  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
14187  {
14188  // Destroy buffers.
14189  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
14190  {
14191  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
14192  if(blockCtx.hBuffer)
14193  {
14194  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
14195  }
14196  }
14197 
14198  if(pCtx->res >= VK_SUCCESS)
14199  {
14200  FreeEmptyBlocks(pStats);
14201  }
14202  }
14203 
14204  if(pCtx->mutexLocked)
14205  {
14206  VMA_ASSERT(m_hAllocator->m_UseMutex);
14207  m_Mutex.UnlockWrite();
14208  }
14209 }
14210 
14211 uint32_t VmaBlockVector::ProcessDefragmentations(
14212  class VmaBlockVectorDefragmentationContext *pCtx,
14213  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
14214 {
14215  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
14216 
14217  const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
14218 
14219  for(uint32_t i = 0; i < moveCount; ++ i)
14220  {
14221  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
14222 
14223  pMove->allocation = move.hAllocation;
14224  pMove->memory = move.pDstBlock->GetDeviceMemory();
14225  pMove->offset = move.dstOffset;
14226 
14227  ++ pMove;
14228  }
14229 
14230  pCtx->defragmentationMovesProcessed += moveCount;
14231 
14232  return moveCount;
14233 }
14234 
14235 void VmaBlockVector::CommitDefragmentations(
14236  class VmaBlockVectorDefragmentationContext *pCtx,
14237  VmaDefragmentationStats* pStats)
14238 {
14239  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
14240 
14241  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
14242  {
14243  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
14244 
14245  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
14246  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
14247  }
14248 
14249  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
14250  FreeEmptyBlocks(pStats);
14251 }
14252 
14253 size_t VmaBlockVector::CalcAllocationCount() const
14254 {
14255  size_t result = 0;
14256  for(size_t i = 0; i < m_Blocks.size(); ++i)
14257  {
14258  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
14259  }
14260  return result;
14261 }
14262 
14263 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
14264 {
14265  if(m_BufferImageGranularity == 1)
14266  {
14267  return false;
14268  }
14269  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
14270  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
14271  {
14272  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
14273  VMA_ASSERT(m_Algorithm == 0);
14274  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
14275  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
14276  {
14277  return true;
14278  }
14279  }
14280  return false;
14281 }
14282 
14283 void VmaBlockVector::MakePoolAllocationsLost(
14284  uint32_t currentFrameIndex,
14285  size_t* pLostAllocationCount)
14286 {
14287  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
14288  size_t lostAllocationCount = 0;
14289  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
14290  {
14291  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
14292  VMA_ASSERT(pBlock);
14293  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
14294  }
14295  if(pLostAllocationCount != VMA_NULL)
14296  {
14297  *pLostAllocationCount = lostAllocationCount;
14298  }
14299 }
14300 
14301 VkResult VmaBlockVector::CheckCorruption()
14302 {
14303  if(!IsCorruptionDetectionEnabled())
14304  {
14305  return VK_ERROR_FEATURE_NOT_PRESENT;
14306  }
14307 
14308  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
14309  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
14310  {
14311  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
14312  VMA_ASSERT(pBlock);
14313  VkResult res = pBlock->CheckCorruption(m_hAllocator);
14314  if(res != VK_SUCCESS)
14315  {
14316  return res;
14317  }
14318  }
14319  return VK_SUCCESS;
14320 }
14321 
14322 void VmaBlockVector::AddStats(VmaStats* pStats)
14323 {
14324  const uint32_t memTypeIndex = m_MemoryTypeIndex;
14325  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
14326 
14327  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
14328 
14329  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
14330  {
14331  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
14332  VMA_ASSERT(pBlock);
14333  VMA_HEAVY_ASSERT(pBlock->Validate());
14334  VmaStatInfo allocationStatInfo;
14335  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
14336  VmaAddStatInfo(pStats->total, allocationStatInfo);
14337  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14338  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14339  }
14340 }
14341 
14343 // VmaDefragmentationAlgorithm_Generic members definition
14344 
14345 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
14346  VmaAllocator hAllocator,
14347  VmaBlockVector* pBlockVector,
14348  uint32_t currentFrameIndex,
14349  bool overlappingMoveSupported) :
14350  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14351  m_AllocationCount(0),
14352  m_AllAllocations(false),
14353  m_BytesMoved(0),
14354  m_AllocationsMoved(0),
14355  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
14356 {
14357  // Create block info for each block.
14358  const size_t blockCount = m_pBlockVector->m_Blocks.size();
14359  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14360  {
14361  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
14362  pBlockInfo->m_OriginalBlockIndex = blockIndex;
14363  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
14364  m_Blocks.push_back(pBlockInfo);
14365  }
14366 
14367  // Sort them by m_pBlock pointer value.
14368  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
14369 }
14370 
14371 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
14372 {
14373  for(size_t i = m_Blocks.size(); i--; )
14374  {
14375  vma_delete(m_hAllocator, m_Blocks[i]);
14376  }
14377 }
14378 
14379 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14380 {
14381  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
14382  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
14383  {
14384  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
14385  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
14386  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
14387  {
14388  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
14389  (*it)->m_Allocations.push_back(allocInfo);
14390  }
14391  else
14392  {
14393  VMA_ASSERT(0);
14394  }
14395 
14396  ++m_AllocationCount;
14397  }
14398 }
14399 
14400 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
14401  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14402  VkDeviceSize maxBytesToMove,
14403  uint32_t maxAllocationsToMove,
14404  bool freeOldAllocations)
14405 {
14406  if(m_Blocks.empty())
14407  {
14408  return VK_SUCCESS;
14409  }
14410 
14411  // This is a choice based on research.
14412  // Option 1:
14413  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
14414  // Option 2:
14415  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
14416  // Option 3:
14417  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
14418 
14419  size_t srcBlockMinIndex = 0;
14420  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
14421  /*
14422  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
14423  {
14424  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
14425  if(blocksWithNonMovableCount > 0)
14426  {
14427  srcBlockMinIndex = blocksWithNonMovableCount - 1;
14428  }
14429  }
14430  */
14431 
14432  size_t srcBlockIndex = m_Blocks.size() - 1;
14433  size_t srcAllocIndex = SIZE_MAX;
14434  for(;;)
14435  {
14436  // 1. Find next allocation to move.
14437  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
14438  // 1.2. Then start from last to first m_Allocations.
14439  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
14440  {
14441  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
14442  {
14443  // Finished: no more allocations to process.
14444  if(srcBlockIndex == srcBlockMinIndex)
14445  {
14446  return VK_SUCCESS;
14447  }
14448  else
14449  {
14450  --srcBlockIndex;
14451  srcAllocIndex = SIZE_MAX;
14452  }
14453  }
14454  else
14455  {
14456  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
14457  }
14458  }
14459 
14460  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
14461  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
14462 
14463  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
14464  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
14465  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
14466  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
14467 
14468  // 2. Try to find new place for this allocation in preceding or current block.
14469  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
14470  {
14471  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
14472  VmaAllocationRequest dstAllocRequest;
14473  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
14474  m_CurrentFrameIndex,
14475  m_pBlockVector->GetFrameInUseCount(),
14476  m_pBlockVector->GetBufferImageGranularity(),
14477  size,
14478  alignment,
14479  false, // upperAddress
14480  suballocType,
14481  false, // canMakeOtherLost
14482  strategy,
14483  &dstAllocRequest) &&
14484  MoveMakesSense(
14485  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
14486  {
14487  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
14488 
14489  // Reached limit on number of allocations or bytes to move.
14490  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
14491  (m_BytesMoved + size > maxBytesToMove))
14492  {
14493  return VK_SUCCESS;
14494  }
14495 
14496  VmaDefragmentationMove move = {};
14497  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
14498  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
14499  move.srcOffset = srcOffset;
14500  move.dstOffset = dstAllocRequest.offset;
14501  move.size = size;
14502  move.hAllocation = allocInfo.m_hAllocation;
14503  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
14504  move.pDstBlock = pDstBlockInfo->m_pBlock;
14505 
14506  moves.push_back(move);
14507 
14508  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
14509  dstAllocRequest,
14510  suballocType,
14511  size,
14512  allocInfo.m_hAllocation);
14513 
14514  if(freeOldAllocations)
14515  {
14516  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
14517  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
14518  }
14519 
14520  if(allocInfo.m_pChanged != VMA_NULL)
14521  {
14522  *allocInfo.m_pChanged = VK_TRUE;
14523  }
14524 
14525  ++m_AllocationsMoved;
14526  m_BytesMoved += size;
14527 
14528  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
14529 
14530  break;
14531  }
14532  }
14533 
14534  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
14535 
14536  if(srcAllocIndex > 0)
14537  {
14538  --srcAllocIndex;
14539  }
14540  else
14541  {
14542  if(srcBlockIndex > 0)
14543  {
14544  --srcBlockIndex;
14545  srcAllocIndex = SIZE_MAX;
14546  }
14547  else
14548  {
14549  return VK_SUCCESS;
14550  }
14551  }
14552  }
14553 }
14554 
14555 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
14556 {
14557  size_t result = 0;
14558  for(size_t i = 0; i < m_Blocks.size(); ++i)
14559  {
14560  if(m_Blocks[i]->m_HasNonMovableAllocations)
14561  {
14562  ++result;
14563  }
14564  }
14565  return result;
14566 }
14567 
14568 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
14569  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14570  VkDeviceSize maxBytesToMove,
14571  uint32_t maxAllocationsToMove,
14573 {
14574  if(!m_AllAllocations && m_AllocationCount == 0)
14575  {
14576  return VK_SUCCESS;
14577  }
14578 
14579  const size_t blockCount = m_Blocks.size();
14580  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14581  {
14582  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
14583 
14584  if(m_AllAllocations)
14585  {
14586  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
14587  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
14588  it != pMetadata->m_Suballocations.end();
14589  ++it)
14590  {
14591  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
14592  {
14593  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
14594  pBlockInfo->m_Allocations.push_back(allocInfo);
14595  }
14596  }
14597  }
14598 
14599  pBlockInfo->CalcHasNonMovableAllocations();
14600 
14601  // This is a choice based on research.
14602  // Option 1:
14603  pBlockInfo->SortAllocationsByOffsetDescending();
14604  // Option 2:
14605  //pBlockInfo->SortAllocationsBySizeDescending();
14606  }
14607 
14608  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
14609  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
14610 
14611  // This is a choice based on research.
14612  const uint32_t roundCount = 2;
14613 
14614  // Execute defragmentation rounds (the main part).
14615  VkResult result = VK_SUCCESS;
14616  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
14617  {
14618  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
14619  }
14620 
14621  return result;
14622 }
14623 
14624 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
14625  size_t dstBlockIndex, VkDeviceSize dstOffset,
14626  size_t srcBlockIndex, VkDeviceSize srcOffset)
14627 {
14628  if(dstBlockIndex < srcBlockIndex)
14629  {
14630  return true;
14631  }
14632  if(dstBlockIndex > srcBlockIndex)
14633  {
14634  return false;
14635  }
14636  if(dstOffset < srcOffset)
14637  {
14638  return true;
14639  }
14640  return false;
14641 }
14642 
14644 // VmaDefragmentationAlgorithm_Fast
14645 
14646 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
14647  VmaAllocator hAllocator,
14648  VmaBlockVector* pBlockVector,
14649  uint32_t currentFrameIndex,
14650  bool overlappingMoveSupported) :
14651  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14652  m_OverlappingMoveSupported(overlappingMoveSupported),
14653  m_AllocationCount(0),
14654  m_AllAllocations(false),
14655  m_BytesMoved(0),
14656  m_AllocationsMoved(0),
14657  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
14658 {
14659  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
14660 
14661 }
14662 
14663 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
14664 {
14665 }
14666 
14667 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
14668  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14669  VkDeviceSize maxBytesToMove,
14670  uint32_t maxAllocationsToMove,
14672 {
14673  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
14674 
14675  const size_t blockCount = m_pBlockVector->GetBlockCount();
14676  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
14677  {
14678  return VK_SUCCESS;
14679  }
14680 
14681  PreprocessMetadata();
14682 
14683  // Sort blocks in order from most destination.
14684 
14685  m_BlockInfos.resize(blockCount);
14686  for(size_t i = 0; i < blockCount; ++i)
14687  {
14688  m_BlockInfos[i].origBlockIndex = i;
14689  }
14690 
14691  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
14692  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
14693  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
14694  });
14695 
14696  // THE MAIN ALGORITHM
14697 
14698  FreeSpaceDatabase freeSpaceDb;
14699 
14700  size_t dstBlockInfoIndex = 0;
14701  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14702  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14703  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14704  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
14705  VkDeviceSize dstOffset = 0;
14706 
14707  bool end = false;
14708  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
14709  {
14710  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
14711  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
14712  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
14713  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
14714  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
14715  {
14716  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
14717  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
14718  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
14719  if(m_AllocationsMoved == maxAllocationsToMove ||
14720  m_BytesMoved + srcAllocSize > maxBytesToMove)
14721  {
14722  end = true;
14723  break;
14724  }
14725  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
14726 
14727  VmaDefragmentationMove move = {};
14728  // Try to place it in one of free spaces from the database.
14729  size_t freeSpaceInfoIndex;
14730  VkDeviceSize dstAllocOffset;
14731  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
14732  freeSpaceInfoIndex, dstAllocOffset))
14733  {
14734  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
14735  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
14736  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
14737 
14738  // Same block
14739  if(freeSpaceInfoIndex == srcBlockInfoIndex)
14740  {
14741  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14742 
14743  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14744 
14745  VmaSuballocation suballoc = *srcSuballocIt;
14746  suballoc.offset = dstAllocOffset;
14747  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
14748  m_BytesMoved += srcAllocSize;
14749  ++m_AllocationsMoved;
14750 
14751  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14752  ++nextSuballocIt;
14753  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14754  srcSuballocIt = nextSuballocIt;
14755 
14756  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14757 
14758  move.srcBlockIndex = srcOrigBlockIndex;
14759  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14760  move.srcOffset = srcAllocOffset;
14761  move.dstOffset = dstAllocOffset;
14762  move.size = srcAllocSize;
14763 
14764  moves.push_back(move);
14765  }
14766  // Different block
14767  else
14768  {
14769  // MOVE OPTION 2: Move the allocation to a different block.
14770 
14771  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
14772 
14773  VmaSuballocation suballoc = *srcSuballocIt;
14774  suballoc.offset = dstAllocOffset;
14775  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
14776  m_BytesMoved += srcAllocSize;
14777  ++m_AllocationsMoved;
14778 
14779  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14780  ++nextSuballocIt;
14781  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14782  srcSuballocIt = nextSuballocIt;
14783 
14784  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14785 
14786  move.srcBlockIndex = srcOrigBlockIndex;
14787  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14788  move.srcOffset = srcAllocOffset;
14789  move.dstOffset = dstAllocOffset;
14790  move.size = srcAllocSize;
14791 
14792  moves.push_back(move);
14793  }
14794  }
14795  else
14796  {
14797  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
14798 
14799  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
14800  while(dstBlockInfoIndex < srcBlockInfoIndex &&
14801  dstAllocOffset + srcAllocSize > dstBlockSize)
14802  {
14803  // But before that, register remaining free space at the end of dst block.
14804  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
14805 
14806  ++dstBlockInfoIndex;
14807  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14808  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14809  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14810  dstBlockSize = pDstMetadata->GetSize();
14811  dstOffset = 0;
14812  dstAllocOffset = 0;
14813  }
14814 
14815  // Same block
14816  if(dstBlockInfoIndex == srcBlockInfoIndex)
14817  {
14818  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14819 
14820  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
14821 
14822  bool skipOver = overlap;
14823  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
14824  {
14825  // If destination and source place overlap, skip if it would move it
14826  // by only < 1/64 of its size.
14827  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
14828  }
14829 
14830  if(skipOver)
14831  {
14832  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
14833 
14834  dstOffset = srcAllocOffset + srcAllocSize;
14835  ++srcSuballocIt;
14836  }
14837  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14838  else
14839  {
14840  srcSuballocIt->offset = dstAllocOffset;
14841  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
14842  dstOffset = dstAllocOffset + srcAllocSize;
14843  m_BytesMoved += srcAllocSize;
14844  ++m_AllocationsMoved;
14845  ++srcSuballocIt;
14846 
14847  move.srcBlockIndex = srcOrigBlockIndex;
14848  move.dstBlockIndex = dstOrigBlockIndex;
14849  move.srcOffset = srcAllocOffset;
14850  move.dstOffset = dstAllocOffset;
14851  move.size = srcAllocSize;
14852 
14853  moves.push_back(move);
14854  }
14855  }
14856  // Different block
14857  else
14858  {
14859  // MOVE OPTION 2: Move the allocation to a different block.
14860 
14861  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
14862  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
14863 
14864  VmaSuballocation suballoc = *srcSuballocIt;
14865  suballoc.offset = dstAllocOffset;
14866  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
14867  dstOffset = dstAllocOffset + srcAllocSize;
14868  m_BytesMoved += srcAllocSize;
14869  ++m_AllocationsMoved;
14870 
14871  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14872  ++nextSuballocIt;
14873  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14874  srcSuballocIt = nextSuballocIt;
14875 
14876  pDstMetadata->m_Suballocations.push_back(suballoc);
14877 
14878  move.srcBlockIndex = srcOrigBlockIndex;
14879  move.dstBlockIndex = dstOrigBlockIndex;
14880  move.srcOffset = srcAllocOffset;
14881  move.dstOffset = dstAllocOffset;
14882  move.size = srcAllocSize;
14883 
14884  moves.push_back(move);
14885  }
14886  }
14887  }
14888  }
14889 
14890  m_BlockInfos.clear();
14891 
14892  PostprocessMetadata();
14893 
14894  return VK_SUCCESS;
14895 }
14896 
14897 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
14898 {
14899  const size_t blockCount = m_pBlockVector->GetBlockCount();
14900  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14901  {
14902  VmaBlockMetadata_Generic* const pMetadata =
14903  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14904  pMetadata->m_FreeCount = 0;
14905  pMetadata->m_SumFreeSize = pMetadata->GetSize();
14906  pMetadata->m_FreeSuballocationsBySize.clear();
14907  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14908  it != pMetadata->m_Suballocations.end(); )
14909  {
14910  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
14911  {
14912  VmaSuballocationList::iterator nextIt = it;
14913  ++nextIt;
14914  pMetadata->m_Suballocations.erase(it);
14915  it = nextIt;
14916  }
14917  else
14918  {
14919  ++it;
14920  }
14921  }
14922  }
14923 }
14924 
14925 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
14926 {
14927  const size_t blockCount = m_pBlockVector->GetBlockCount();
14928  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14929  {
14930  VmaBlockMetadata_Generic* const pMetadata =
14931  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14932  const VkDeviceSize blockSize = pMetadata->GetSize();
14933 
14934  // No allocations in this block - entire area is free.
14935  if(pMetadata->m_Suballocations.empty())
14936  {
14937  pMetadata->m_FreeCount = 1;
14938  //pMetadata->m_SumFreeSize is already set to blockSize.
14939  VmaSuballocation suballoc = {
14940  0, // offset
14941  blockSize, // size
14942  VMA_NULL, // hAllocation
14943  VMA_SUBALLOCATION_TYPE_FREE };
14944  pMetadata->m_Suballocations.push_back(suballoc);
14945  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
14946  }
14947  // There are some allocations in this block.
14948  else
14949  {
14950  VkDeviceSize offset = 0;
14951  VmaSuballocationList::iterator it;
14952  for(it = pMetadata->m_Suballocations.begin();
14953  it != pMetadata->m_Suballocations.end();
14954  ++it)
14955  {
14956  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
14957  VMA_ASSERT(it->offset >= offset);
14958 
14959  // Need to insert preceding free space.
14960  if(it->offset > offset)
14961  {
14962  ++pMetadata->m_FreeCount;
14963  const VkDeviceSize freeSize = it->offset - offset;
14964  VmaSuballocation suballoc = {
14965  offset, // offset
14966  freeSize, // size
14967  VMA_NULL, // hAllocation
14968  VMA_SUBALLOCATION_TYPE_FREE };
14969  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14970  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14971  {
14972  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
14973  }
14974  }
14975 
14976  pMetadata->m_SumFreeSize -= it->size;
14977  offset = it->offset + it->size;
14978  }
14979 
14980  // Need to insert trailing free space.
14981  if(offset < blockSize)
14982  {
14983  ++pMetadata->m_FreeCount;
14984  const VkDeviceSize freeSize = blockSize - offset;
14985  VmaSuballocation suballoc = {
14986  offset, // offset
14987  freeSize, // size
14988  VMA_NULL, // hAllocation
14989  VMA_SUBALLOCATION_TYPE_FREE };
14990  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
14991  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14992  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14993  {
14994  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
14995  }
14996  }
14997 
14998  VMA_SORT(
14999  pMetadata->m_FreeSuballocationsBySize.begin(),
15000  pMetadata->m_FreeSuballocationsBySize.end(),
15001  VmaSuballocationItemSizeLess());
15002  }
15003 
15004  VMA_HEAVY_ASSERT(pMetadata->Validate());
15005  }
15006 }
15007 
15008 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
15009 {
15010  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
15011  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
15012  while(it != pMetadata->m_Suballocations.end())
15013  {
15014  if(it->offset < suballoc.offset)
15015  {
15016  ++it;
15017  }
15018  }
15019  pMetadata->m_Suballocations.insert(it, suballoc);
15020 }
15021 
15023 // VmaBlockVectorDefragmentationContext
15024 
15025 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
15026  VmaAllocator hAllocator,
15027  VmaPool hCustomPool,
15028  VmaBlockVector* pBlockVector,
15029  uint32_t currFrameIndex) :
15030  res(VK_SUCCESS),
15031  mutexLocked(false),
15032  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
15033  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
15034  defragmentationMovesProcessed(0),
15035  defragmentationMovesCommitted(0),
15036  hasDefragmentationPlan(0),
15037  m_hAllocator(hAllocator),
15038  m_hCustomPool(hCustomPool),
15039  m_pBlockVector(pBlockVector),
15040  m_CurrFrameIndex(currFrameIndex),
15041  m_pAlgorithm(VMA_NULL),
15042  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
15043  m_AllAllocations(false)
15044 {
15045 }
15046 
15047 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
15048 {
15049  vma_delete(m_hAllocator, m_pAlgorithm);
15050 }
15051 
15052 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
15053 {
15054  AllocInfo info = { hAlloc, pChanged };
15055  m_Allocations.push_back(info);
15056 }
15057 
15058 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
15059 {
15060  const bool allAllocations = m_AllAllocations ||
15061  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
15062 
15063  /********************************
15064  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
15065  ********************************/
15066 
15067  /*
15068  Fast algorithm is supported only when certain criteria are met:
15069  - VMA_DEBUG_MARGIN is 0.
15070  - All allocations in this block vector are moveable.
15071  - There is no possibility of image/buffer granularity conflict.
15072  - The defragmentation is not incremental
15073  */
15074  if(VMA_DEBUG_MARGIN == 0 &&
15075  allAllocations &&
15076  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
15078  {
15079  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
15080  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
15081  }
15082  else
15083  {
15084  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
15085  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
15086  }
15087 
15088  if(allAllocations)
15089  {
15090  m_pAlgorithm->AddAll();
15091  }
15092  else
15093  {
15094  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
15095  {
15096  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
15097  }
15098  }
15099 }
15100 
15102 // VmaDefragmentationContext
15103 
15104 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
15105  VmaAllocator hAllocator,
15106  uint32_t currFrameIndex,
15107  uint32_t flags,
15108  VmaDefragmentationStats* pStats) :
15109  m_hAllocator(hAllocator),
15110  m_CurrFrameIndex(currFrameIndex),
15111  m_Flags(flags),
15112  m_pStats(pStats),
15113  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
15114 {
15115  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
15116 }
15117 
15118 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
15119 {
15120  for(size_t i = m_CustomPoolContexts.size(); i--; )
15121  {
15122  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
15123  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
15124  vma_delete(m_hAllocator, pBlockVectorCtx);
15125  }
15126  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
15127  {
15128  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
15129  if(pBlockVectorCtx)
15130  {
15131  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
15132  vma_delete(m_hAllocator, pBlockVectorCtx);
15133  }
15134  }
15135 }
15136 
15137 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
15138 {
15139  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15140  {
15141  VmaPool pool = pPools[poolIndex];
15142  VMA_ASSERT(pool);
15143  // Pools with algorithm other than default are not defragmented.
15144  if(pool->m_BlockVector.GetAlgorithm() == 0)
15145  {
15146  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
15147 
15148  for(size_t i = m_CustomPoolContexts.size(); i--; )
15149  {
15150  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
15151  {
15152  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
15153  break;
15154  }
15155  }
15156 
15157  if(!pBlockVectorDefragCtx)
15158  {
15159  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
15160  m_hAllocator,
15161  pool,
15162  &pool->m_BlockVector,
15163  m_CurrFrameIndex);
15164  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
15165  }
15166 
15167  pBlockVectorDefragCtx->AddAll();
15168  }
15169  }
15170 }
15171 
15172 void VmaDefragmentationContext_T::AddAllocations(
15173  uint32_t allocationCount,
15174  const VmaAllocation* pAllocations,
15175  VkBool32* pAllocationsChanged)
15176 {
15177  // Dispatch pAllocations among defragmentators. Create them when necessary.
15178  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15179  {
15180  const VmaAllocation hAlloc = pAllocations[allocIndex];
15181  VMA_ASSERT(hAlloc);
15182  // DedicatedAlloc cannot be defragmented.
15183  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
15184  // Lost allocation cannot be defragmented.
15185  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
15186  {
15187  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
15188 
15189  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
15190  // This allocation belongs to custom pool.
15191  if(hAllocPool != VK_NULL_HANDLE)
15192  {
15193  // Pools with algorithm other than default are not defragmented.
15194  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
15195  {
15196  for(size_t i = m_CustomPoolContexts.size(); i--; )
15197  {
15198  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
15199  {
15200  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
15201  break;
15202  }
15203  }
15204  if(!pBlockVectorDefragCtx)
15205  {
15206  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
15207  m_hAllocator,
15208  hAllocPool,
15209  &hAllocPool->m_BlockVector,
15210  m_CurrFrameIndex);
15211  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
15212  }
15213  }
15214  }
15215  // This allocation belongs to default pool.
15216  else
15217  {
15218  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
15219  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
15220  if(!pBlockVectorDefragCtx)
15221  {
15222  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
15223  m_hAllocator,
15224  VMA_NULL, // hCustomPool
15225  m_hAllocator->m_pBlockVectors[memTypeIndex],
15226  m_CurrFrameIndex);
15227  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
15228  }
15229  }
15230 
15231  if(pBlockVectorDefragCtx)
15232  {
15233  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
15234  &pAllocationsChanged[allocIndex] : VMA_NULL;
15235  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
15236  }
15237  }
15238  }
15239 }
15240 
15241 VkResult VmaDefragmentationContext_T::Defragment(
15242  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
15243  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
15244  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
15245 {
15246  if(pStats)
15247  {
15248  memset(pStats, 0, sizeof(VmaDefragmentationStats));
15249  }
15250 
15252  {
15253  // For incremental defragmetnations, we just earmark how much we can move
15254  // The real meat is in the defragmentation steps
15255  m_MaxCpuBytesToMove = maxCpuBytesToMove;
15256  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
15257 
15258  m_MaxGpuBytesToMove = maxGpuBytesToMove;
15259  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
15260 
15261  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
15262  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
15263  return VK_SUCCESS;
15264 
15265  return VK_NOT_READY;
15266  }
15267 
15268  if(commandBuffer == VK_NULL_HANDLE)
15269  {
15270  maxGpuBytesToMove = 0;
15271  maxGpuAllocationsToMove = 0;
15272  }
15273 
15274  VkResult res = VK_SUCCESS;
15275 
15276  // Process default pools.
15277  for(uint32_t memTypeIndex = 0;
15278  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
15279  ++memTypeIndex)
15280  {
15281  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15282  if(pBlockVectorCtx)
15283  {
15284  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15285  pBlockVectorCtx->GetBlockVector()->Defragment(
15286  pBlockVectorCtx,
15287  pStats, flags,
15288  maxCpuBytesToMove, maxCpuAllocationsToMove,
15289  maxGpuBytesToMove, maxGpuAllocationsToMove,
15290  commandBuffer);
15291  if(pBlockVectorCtx->res != VK_SUCCESS)
15292  {
15293  res = pBlockVectorCtx->res;
15294  }
15295  }
15296  }
15297 
15298  // Process custom pools.
15299  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15300  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
15301  ++customCtxIndex)
15302  {
15303  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15304  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15305  pBlockVectorCtx->GetBlockVector()->Defragment(
15306  pBlockVectorCtx,
15307  pStats, flags,
15308  maxCpuBytesToMove, maxCpuAllocationsToMove,
15309  maxGpuBytesToMove, maxGpuAllocationsToMove,
15310  commandBuffer);
15311  if(pBlockVectorCtx->res != VK_SUCCESS)
15312  {
15313  res = pBlockVectorCtx->res;
15314  }
15315  }
15316 
15317  return res;
15318 }
15319 
15320 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
15321 {
15322  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
15323  uint32_t movesLeft = pInfo->moveCount;
15324 
15325  // Process default pools.
15326  for(uint32_t memTypeIndex = 0;
15327  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
15328  ++memTypeIndex)
15329  {
15330  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15331  if(pBlockVectorCtx)
15332  {
15333  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15334 
15335  if(!pBlockVectorCtx->hasDefragmentationPlan)
15336  {
15337  pBlockVectorCtx->GetBlockVector()->Defragment(
15338  pBlockVectorCtx,
15339  m_pStats, m_Flags,
15340  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
15341  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
15342  VK_NULL_HANDLE);
15343 
15344  if(pBlockVectorCtx->res < VK_SUCCESS)
15345  continue;
15346 
15347  pBlockVectorCtx->hasDefragmentationPlan = true;
15348  }
15349 
15350  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
15351  pBlockVectorCtx,
15352  pCurrentMove, movesLeft);
15353 
15354  movesLeft -= processed;
15355  pCurrentMove += processed;
15356  }
15357  }
15358 
15359  // Process custom pools.
15360  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15361  customCtxIndex < customCtxCount;
15362  ++customCtxIndex)
15363  {
15364  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15365  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15366 
15367  if(!pBlockVectorCtx->hasDefragmentationPlan)
15368  {
15369  pBlockVectorCtx->GetBlockVector()->Defragment(
15370  pBlockVectorCtx,
15371  m_pStats, m_Flags,
15372  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
15373  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
15374  VK_NULL_HANDLE);
15375 
15376  if(pBlockVectorCtx->res < VK_SUCCESS)
15377  continue;
15378 
15379  pBlockVectorCtx->hasDefragmentationPlan = true;
15380  }
15381 
15382  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
15383  pBlockVectorCtx,
15384  pCurrentMove, movesLeft);
15385 
15386  movesLeft -= processed;
15387  pCurrentMove += processed;
15388  }
15389 
15390  pInfo->moveCount = pInfo->moveCount - movesLeft;
15391 
15392  return VK_SUCCESS;
15393 }
15394 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
15395 {
15396  VkResult res = VK_SUCCESS;
15397 
15398  // Process default pools.
15399  for(uint32_t memTypeIndex = 0;
15400  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
15401  ++memTypeIndex)
15402  {
15403  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15404  if(pBlockVectorCtx)
15405  {
15406  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15407 
15408  if(!pBlockVectorCtx->hasDefragmentationPlan)
15409  {
15410  res = VK_NOT_READY;
15411  continue;
15412  }
15413 
15414  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
15415  pBlockVectorCtx, m_pStats);
15416 
15417  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15418  res = VK_NOT_READY;
15419  }
15420  }
15421 
15422  // Process custom pools.
15423  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15424  customCtxIndex < customCtxCount;
15425  ++customCtxIndex)
15426  {
15427  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15428  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15429 
15430  if(!pBlockVectorCtx->hasDefragmentationPlan)
15431  {
15432  res = VK_NOT_READY;
15433  continue;
15434  }
15435 
15436  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
15437  pBlockVectorCtx, m_pStats);
15438 
15439  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15440  res = VK_NOT_READY;
15441  }
15442 
15443  return res;
15444 }
15445 
15447 // VmaRecorder
15448 
15449 #if VMA_RECORDING_ENABLED
15450 
15451 VmaRecorder::VmaRecorder() :
15452  m_UseMutex(true),
15453  m_Flags(0),
15454  m_File(VMA_NULL),
15455  m_RecordingStartTime(std::chrono::high_resolution_clock::now())
15456 {
15457 }
15458 
15459 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
15460 {
15461  m_UseMutex = useMutex;
15462  m_Flags = settings.flags;
15463 
15464 #if defined(_WIN32)
15465  // Open file for writing.
15466  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
15467 
15468  if(err != 0)
15469  {
15470  return VK_ERROR_INITIALIZATION_FAILED;
15471  }
15472 #else
15473  // Open file for writing.
15474  m_File = fopen(settings.pFilePath, "wb");
15475 
15476  if(m_File == 0)
15477  {
15478  return VK_ERROR_INITIALIZATION_FAILED;
15479  }
15480 #endif
15481 
15482  // Write header.
15483  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
15484  fprintf(m_File, "%s\n", "1,8");
15485 
15486  return VK_SUCCESS;
15487 }
15488 
15489 VmaRecorder::~VmaRecorder()
15490 {
15491  if(m_File != VMA_NULL)
15492  {
15493  fclose(m_File);
15494  }
15495 }
15496 
15497 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
15498 {
15499  CallParams callParams;
15500  GetBasicParams(callParams);
15501 
15502  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15503  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
15504  Flush();
15505 }
15506 
15507 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
15508 {
15509  CallParams callParams;
15510  GetBasicParams(callParams);
15511 
15512  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15513  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
15514  Flush();
15515 }
15516 
15517 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
15518 {
15519  CallParams callParams;
15520  GetBasicParams(callParams);
15521 
15522  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15523  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
15524  createInfo.memoryTypeIndex,
15525  createInfo.flags,
15526  createInfo.blockSize,
15527  (uint64_t)createInfo.minBlockCount,
15528  (uint64_t)createInfo.maxBlockCount,
15529  createInfo.frameInUseCount,
15530  pool);
15531  Flush();
15532 }
15533 
15534 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
15535 {
15536  CallParams callParams;
15537  GetBasicParams(callParams);
15538 
15539  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15540  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
15541  pool);
15542  Flush();
15543 }
15544 
15545 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
15546  const VkMemoryRequirements& vkMemReq,
15547  const VmaAllocationCreateInfo& createInfo,
15548  VmaAllocation allocation)
15549 {
15550  CallParams callParams;
15551  GetBasicParams(callParams);
15552 
15553  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15554  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15555  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15556  vkMemReq.size,
15557  vkMemReq.alignment,
15558  vkMemReq.memoryTypeBits,
15559  createInfo.flags,
15560  createInfo.usage,
15561  createInfo.requiredFlags,
15562  createInfo.preferredFlags,
15563  createInfo.memoryTypeBits,
15564  createInfo.pool,
15565  allocation,
15566  userDataStr.GetString());
15567  Flush();
15568 }
15569 
15570 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
15571  const VkMemoryRequirements& vkMemReq,
15572  const VmaAllocationCreateInfo& createInfo,
15573  uint64_t allocationCount,
15574  const VmaAllocation* pAllocations)
15575 {
15576  CallParams callParams;
15577  GetBasicParams(callParams);
15578 
15579  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15580  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15581  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
15582  vkMemReq.size,
15583  vkMemReq.alignment,
15584  vkMemReq.memoryTypeBits,
15585  createInfo.flags,
15586  createInfo.usage,
15587  createInfo.requiredFlags,
15588  createInfo.preferredFlags,
15589  createInfo.memoryTypeBits,
15590  createInfo.pool);
15591  PrintPointerList(allocationCount, pAllocations);
15592  fprintf(m_File, ",%s\n", userDataStr.GetString());
15593  Flush();
15594 }
15595 
15596 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
15597  const VkMemoryRequirements& vkMemReq,
15598  bool requiresDedicatedAllocation,
15599  bool prefersDedicatedAllocation,
15600  const VmaAllocationCreateInfo& createInfo,
15601  VmaAllocation allocation)
15602 {
15603  CallParams callParams;
15604  GetBasicParams(callParams);
15605 
15606  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15607  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15608  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15609  vkMemReq.size,
15610  vkMemReq.alignment,
15611  vkMemReq.memoryTypeBits,
15612  requiresDedicatedAllocation ? 1 : 0,
15613  prefersDedicatedAllocation ? 1 : 0,
15614  createInfo.flags,
15615  createInfo.usage,
15616  createInfo.requiredFlags,
15617  createInfo.preferredFlags,
15618  createInfo.memoryTypeBits,
15619  createInfo.pool,
15620  allocation,
15621  userDataStr.GetString());
15622  Flush();
15623 }
15624 
15625 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
15626  const VkMemoryRequirements& vkMemReq,
15627  bool requiresDedicatedAllocation,
15628  bool prefersDedicatedAllocation,
15629  const VmaAllocationCreateInfo& createInfo,
15630  VmaAllocation allocation)
15631 {
15632  CallParams callParams;
15633  GetBasicParams(callParams);
15634 
15635  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15636  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15637  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15638  vkMemReq.size,
15639  vkMemReq.alignment,
15640  vkMemReq.memoryTypeBits,
15641  requiresDedicatedAllocation ? 1 : 0,
15642  prefersDedicatedAllocation ? 1 : 0,
15643  createInfo.flags,
15644  createInfo.usage,
15645  createInfo.requiredFlags,
15646  createInfo.preferredFlags,
15647  createInfo.memoryTypeBits,
15648  createInfo.pool,
15649  allocation,
15650  userDataStr.GetString());
15651  Flush();
15652 }
15653 
15654 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
15655  VmaAllocation allocation)
15656 {
15657  CallParams callParams;
15658  GetBasicParams(callParams);
15659 
15660  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15661  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15662  allocation);
15663  Flush();
15664 }
15665 
15666 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
15667  uint64_t allocationCount,
15668  const VmaAllocation* pAllocations)
15669 {
15670  CallParams callParams;
15671  GetBasicParams(callParams);
15672 
15673  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15674  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
15675  PrintPointerList(allocationCount, pAllocations);
15676  fprintf(m_File, "\n");
15677  Flush();
15678 }
15679 
15680 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
15681  VmaAllocation allocation,
15682  const void* pUserData)
15683 {
15684  CallParams callParams;
15685  GetBasicParams(callParams);
15686 
15687  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15688  UserDataString userDataStr(
15689  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
15690  pUserData);
15691  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15692  allocation,
15693  userDataStr.GetString());
15694  Flush();
15695 }
15696 
15697 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
15698  VmaAllocation allocation)
15699 {
15700  CallParams callParams;
15701  GetBasicParams(callParams);
15702 
15703  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15704  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15705  allocation);
15706  Flush();
15707 }
15708 
15709 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
15710  VmaAllocation allocation)
15711 {
15712  CallParams callParams;
15713  GetBasicParams(callParams);
15714 
15715  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15716  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15717  allocation);
15718  Flush();
15719 }
15720 
15721 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
15722  VmaAllocation allocation)
15723 {
15724  CallParams callParams;
15725  GetBasicParams(callParams);
15726 
15727  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15728  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15729  allocation);
15730  Flush();
15731 }
15732 
15733 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
15734  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15735 {
15736  CallParams callParams;
15737  GetBasicParams(callParams);
15738 
15739  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15740  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15741  allocation,
15742  offset,
15743  size);
15744  Flush();
15745 }
15746 
15747 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
15748  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15749 {
15750  CallParams callParams;
15751  GetBasicParams(callParams);
15752 
15753  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15754  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15755  allocation,
15756  offset,
15757  size);
15758  Flush();
15759 }
15760 
15761 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
15762  const VkBufferCreateInfo& bufCreateInfo,
15763  const VmaAllocationCreateInfo& allocCreateInfo,
15764  VmaAllocation allocation)
15765 {
15766  CallParams callParams;
15767  GetBasicParams(callParams);
15768 
15769  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15770  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15771  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15772  bufCreateInfo.flags,
15773  bufCreateInfo.size,
15774  bufCreateInfo.usage,
15775  bufCreateInfo.sharingMode,
15776  allocCreateInfo.flags,
15777  allocCreateInfo.usage,
15778  allocCreateInfo.requiredFlags,
15779  allocCreateInfo.preferredFlags,
15780  allocCreateInfo.memoryTypeBits,
15781  allocCreateInfo.pool,
15782  allocation,
15783  userDataStr.GetString());
15784  Flush();
15785 }
15786 
15787 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
15788  const VkImageCreateInfo& imageCreateInfo,
15789  const VmaAllocationCreateInfo& allocCreateInfo,
15790  VmaAllocation allocation)
15791 {
15792  CallParams callParams;
15793  GetBasicParams(callParams);
15794 
15795  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15796  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15797  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15798  imageCreateInfo.flags,
15799  imageCreateInfo.imageType,
15800  imageCreateInfo.format,
15801  imageCreateInfo.extent.width,
15802  imageCreateInfo.extent.height,
15803  imageCreateInfo.extent.depth,
15804  imageCreateInfo.mipLevels,
15805  imageCreateInfo.arrayLayers,
15806  imageCreateInfo.samples,
15807  imageCreateInfo.tiling,
15808  imageCreateInfo.usage,
15809  imageCreateInfo.sharingMode,
15810  imageCreateInfo.initialLayout,
15811  allocCreateInfo.flags,
15812  allocCreateInfo.usage,
15813  allocCreateInfo.requiredFlags,
15814  allocCreateInfo.preferredFlags,
15815  allocCreateInfo.memoryTypeBits,
15816  allocCreateInfo.pool,
15817  allocation,
15818  userDataStr.GetString());
15819  Flush();
15820 }
15821 
15822 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
15823  VmaAllocation allocation)
15824 {
15825  CallParams callParams;
15826  GetBasicParams(callParams);
15827 
15828  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15829  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
15830  allocation);
15831  Flush();
15832 }
15833 
15834 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
15835  VmaAllocation allocation)
15836 {
15837  CallParams callParams;
15838  GetBasicParams(callParams);
15839 
15840  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15841  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
15842  allocation);
15843  Flush();
15844 }
15845 
15846 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
15847  VmaAllocation allocation)
15848 {
15849  CallParams callParams;
15850  GetBasicParams(callParams);
15851 
15852  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15853  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15854  allocation);
15855  Flush();
15856 }
15857 
15858 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
15859  VmaAllocation allocation)
15860 {
15861  CallParams callParams;
15862  GetBasicParams(callParams);
15863 
15864  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15865  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
15866  allocation);
15867  Flush();
15868 }
15869 
15870 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
15871  VmaPool pool)
15872 {
15873  CallParams callParams;
15874  GetBasicParams(callParams);
15875 
15876  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15877  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
15878  pool);
15879  Flush();
15880 }
15881 
15882 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
15883  const VmaDefragmentationInfo2& info,
15885 {
15886  CallParams callParams;
15887  GetBasicParams(callParams);
15888 
15889  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15890  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
15891  info.flags);
15892  PrintPointerList(info.allocationCount, info.pAllocations);
15893  fprintf(m_File, ",");
15894  PrintPointerList(info.poolCount, info.pPools);
15895  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
15896  info.maxCpuBytesToMove,
15898  info.maxGpuBytesToMove,
15900  info.commandBuffer,
15901  ctx);
15902  Flush();
15903 }
15904 
15905 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
15907 {
15908  CallParams callParams;
15909  GetBasicParams(callParams);
15910 
15911  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15912  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
15913  ctx);
15914  Flush();
15915 }
15916 
15917 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
15918  VmaPool pool,
15919  const char* name)
15920 {
15921  CallParams callParams;
15922  GetBasicParams(callParams);
15923 
15924  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15925  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15926  pool, name != VMA_NULL ? name : "");
15927  Flush();
15928 }
15929 
15930 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
15931 {
15932  if(pUserData != VMA_NULL)
15933  {
15934  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
15935  {
15936  m_Str = (const char*)pUserData;
15937  }
15938  else
15939  {
15940  // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it.
15941  snprintf(m_PtrStr, 17, "%p", pUserData);
15942  m_Str = m_PtrStr;
15943  }
15944  }
15945  else
15946  {
15947  m_Str = "";
15948  }
15949 }
15950 
15951 void VmaRecorder::WriteConfiguration(
15952  const VkPhysicalDeviceProperties& devProps,
15953  const VkPhysicalDeviceMemoryProperties& memProps,
15954  uint32_t vulkanApiVersion,
15955  bool dedicatedAllocationExtensionEnabled,
15956  bool bindMemory2ExtensionEnabled,
15957  bool memoryBudgetExtensionEnabled,
15958  bool deviceCoherentMemoryExtensionEnabled)
15959 {
15960  fprintf(m_File, "Config,Begin\n");
15961 
15962  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
15963 
15964  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
15965  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
15966  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
15967  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
15968  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
15969  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
15970 
15971  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
15972  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
15973  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
15974 
15975  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
15976  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
15977  {
15978  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
15979  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
15980  }
15981  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
15982  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
15983  {
15984  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
15985  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
15986  }
15987 
15988  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
15989  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
15990  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
15991  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
15992 
15993  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
15994  fprintf(m_File, "Macro,VMA_MIN_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_MIN_ALIGNMENT);
15995  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
15996  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
15997  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
15998  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
15999  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
16000  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
16001  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
16002 
16003  fprintf(m_File, "Config,End\n");
16004 }
16005 
16006 void VmaRecorder::GetBasicParams(CallParams& outParams)
16007 {
16008  #if defined(_WIN32)
16009  outParams.threadId = GetCurrentThreadId();
16010  #else
16011  // Use C++11 features to get thread id and convert it to uint32_t.
16012  // There is room for optimization since sstream is quite slow.
16013  // Is there a better way to convert std::this_thread::get_id() to uint32_t?
16014  std::thread::id thread_id = std::this_thread::get_id();
16015  std::stringstream thread_id_to_string_converter;
16016  thread_id_to_string_converter << thread_id;
16017  std::string thread_id_as_string = thread_id_to_string_converter.str();
16018  outParams.threadId = static_cast<uint32_t>(std::stoi(thread_id_as_string.c_str()));
16019  #endif
16020 
16021  auto current_time = std::chrono::high_resolution_clock::now();
16022 
16023  outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
16024 }
16025 
16026 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
16027 {
16028  if(count)
16029  {
16030  fprintf(m_File, "%p", pItems[0]);
16031  for(uint64_t i = 1; i < count; ++i)
16032  {
16033  fprintf(m_File, " %p", pItems[i]);
16034  }
16035  }
16036 }
16037 
16038 void VmaRecorder::Flush()
16039 {
16040  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
16041  {
16042  fflush(m_File);
16043  }
16044 }
16045 
16046 #endif // #if VMA_RECORDING_ENABLED
16047 
16049 // VmaAllocationObjectAllocator
16050 
16051 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
16052  m_Allocator(pAllocationCallbacks, 1024)
16053 {
16054 }
16055 
16056 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
16057 {
16058  VmaMutexLock mutexLock(m_Mutex);
16059  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
16060 }
16061 
16062 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
16063 {
16064  VmaMutexLock mutexLock(m_Mutex);
16065  m_Allocator.Free(hAlloc);
16066 }
16067 
16069 // VmaAllocator_T
16070 
16071 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
16072  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
16073  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
16074  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
16075  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
16076  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
16077  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
16078  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
16079  m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
16080  m_hDevice(pCreateInfo->device),
16081  m_hInstance(pCreateInfo->instance),
16082  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
16083  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
16084  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
16085  m_AllocationObjectAllocator(&m_AllocationCallbacks),
16086  m_HeapSizeLimitMask(0),
16087  m_DeviceMemoryCount(0),
16088  m_PreferredLargeHeapBlockSize(0),
16089  m_PhysicalDevice(pCreateInfo->physicalDevice),
16090  m_CurrentFrameIndex(0),
16091  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
16092  m_NextPoolId(0),
16093  m_GlobalMemoryTypeBits(UINT32_MAX)
16095  ,m_pRecorder(VMA_NULL)
16096 #endif
16097 {
16098  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16099  {
16100  m_UseKhrDedicatedAllocation = false;
16101  m_UseKhrBindMemory2 = false;
16102  }
16103 
16104  if(VMA_DEBUG_DETECT_CORRUPTION)
16105  {
16106  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
16107  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
16108  }
16109 
16110  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
16111 
16112  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
16113  {
16114 #if !(VMA_DEDICATED_ALLOCATION)
16116  {
16117  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
16118  }
16119 #endif
16120 #if !(VMA_BIND_MEMORY2)
16121  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
16122  {
16123  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
16124  }
16125 #endif
16126  }
16127 #if !(VMA_MEMORY_BUDGET)
16128  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
16129  {
16130  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
16131  }
16132 #endif
16133 #if !(VMA_BUFFER_DEVICE_ADDRESS)
16134  if(m_UseKhrBufferDeviceAddress)
16135  {
16136  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
16137  }
16138 #endif
16139 #if VMA_VULKAN_VERSION < 1002000
16140  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
16141  {
16142  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
16143  }
16144 #endif
16145 #if VMA_VULKAN_VERSION < 1001000
16146  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16147  {
16148  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
16149  }
16150 #endif
16151 #if !(VMA_MEMORY_PRIORITY)
16152  if(m_UseExtMemoryPriority)
16153  {
16154  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
16155  }
16156 #endif
16157 
16158  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
16159  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
16160  memset(&m_MemProps, 0, sizeof(m_MemProps));
16161 
16162  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
16163  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
16164 
16165 #if VMA_EXTERNAL_MEMORY
16166  memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));
16167 #endif // #if VMA_EXTERNAL_MEMORY
16168 
16169  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
16170  {
16171  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
16172  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
16173  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
16174  }
16175 
16176  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
16177 
16178  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
16179  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
16180 
16181  VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
16182  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
16183  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
16184  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
16185 
16186  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
16187  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
16188 
16189  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
16190 
16191 #if VMA_EXTERNAL_MEMORY
16192  if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)
16193  {
16194  memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,
16195  sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());
16196  }
16197 #endif // #if VMA_EXTERNAL_MEMORY
16198 
16199  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
16200  {
16201  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16202  {
16203  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
16204  if(limit != VK_WHOLE_SIZE)
16205  {
16206  m_HeapSizeLimitMask |= 1u << heapIndex;
16207  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
16208  {
16209  m_MemProps.memoryHeaps[heapIndex].size = limit;
16210  }
16211  }
16212  }
16213  }
16214 
16215  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16216  {
16217  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
16218 
16219  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
16220  this,
16221  VK_NULL_HANDLE, // hParentPool
16222  memTypeIndex,
16223  preferredBlockSize,
16224  0,
16225  SIZE_MAX,
16226  GetBufferImageGranularity(),
16227  pCreateInfo->frameInUseCount,
16228  false, // explicitBlockSize
16229  false, // linearAlgorithm
16230  0.5f, // priority (0.5 is the default per Vulkan spec)
16231  GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
16232  VMA_NULL); // // pMemoryAllocateNext
16233  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
16234  // becase minBlockCount is 0.
16235  }
16236 }
16237 
16238 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
16239 {
16240  VkResult res = VK_SUCCESS;
16241 
16242  if(pCreateInfo->pRecordSettings != VMA_NULL &&
16243  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
16244  {
16245 #if VMA_RECORDING_ENABLED
16246  m_pRecorder = vma_new(this, VmaRecorder)();
16247  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
16248  if(res != VK_SUCCESS)
16249  {
16250  return res;
16251  }
16252  m_pRecorder->WriteConfiguration(
16253  m_PhysicalDeviceProperties,
16254  m_MemProps,
16255  m_VulkanApiVersion,
16256  m_UseKhrDedicatedAllocation,
16257  m_UseKhrBindMemory2,
16258  m_UseExtMemoryBudget,
16259  m_UseAmdDeviceCoherentMemory);
16260  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
16261 #else
16262  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
16263  return VK_ERROR_FEATURE_NOT_PRESENT;
16264 #endif
16265  }
16266 
16267 #if VMA_MEMORY_BUDGET
16268  if(m_UseExtMemoryBudget)
16269  {
16270  UpdateVulkanBudget();
16271  }
16272 #endif // #if VMA_MEMORY_BUDGET
16273 
16274  return res;
16275 }
16276 
16277 VmaAllocator_T::~VmaAllocator_T()
16278 {
16279 #if VMA_RECORDING_ENABLED
16280  if(m_pRecorder != VMA_NULL)
16281  {
16282  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
16283  vma_delete(this, m_pRecorder);
16284  }
16285 #endif
16286 
16287  VMA_ASSERT(m_Pools.IsEmpty());
16288 
16289  for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
16290  {
16291  if(!m_DedicatedAllocations[memTypeIndex].IsEmpty())
16292  {
16293  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
16294  }
16295 
16296  vma_delete(this, m_pBlockVectors[memTypeIndex]);
16297  }
16298 }
16299 
16300 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
16301 {
16302 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
16303  ImportVulkanFunctions_Static();
16304 #endif
16305 
16306  if(pVulkanFunctions != VMA_NULL)
16307  {
16308  ImportVulkanFunctions_Custom(pVulkanFunctions);
16309  }
16310 
16311 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16312  ImportVulkanFunctions_Dynamic();
16313 #endif
16314 
16315  ValidateVulkanFunctions();
16316 }
16317 
16318 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
16319 
16320 void VmaAllocator_T::ImportVulkanFunctions_Static()
16321 {
16322  // Vulkan 1.0
16323  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
16324  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
16325  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
16326  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
16327  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
16328  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
16329  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
16330  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
16331  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
16332  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
16333  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
16334  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
16335  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
16336  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
16337  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
16338  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
16339  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
16340 
16341  // Vulkan 1.1
16342 #if VMA_VULKAN_VERSION >= 1001000
16343  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16344  {
16345  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
16346  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
16347  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
16348  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
16349  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
16350  }
16351 #endif
16352 }
16353 
16354 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
16355 
16356 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
16357 {
16358  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
16359 
16360 #define VMA_COPY_IF_NOT_NULL(funcName) \
16361  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
16362 
16363  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
16364  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
16365  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
16366  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
16367  VMA_COPY_IF_NOT_NULL(vkMapMemory);
16368  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
16369  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
16370  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
16371  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
16372  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
16373  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
16374  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
16375  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
16376  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
16377  VMA_COPY_IF_NOT_NULL(vkCreateImage);
16378  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
16379  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
16380 
16381 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16382  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
16383  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
16384 #endif
16385 
16386 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
16387  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
16388  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
16389 #endif
16390 
16391 #if VMA_MEMORY_BUDGET
16392  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
16393 #endif
16394 
16395 #undef VMA_COPY_IF_NOT_NULL
16396 }
16397 
16398 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16399 
16400 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
16401 {
16402 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
16403  if(m_VulkanFunctions.memberName == VMA_NULL) \
16404  m_VulkanFunctions.memberName = \
16405  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
16406 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
16407  if(m_VulkanFunctions.memberName == VMA_NULL) \
16408  m_VulkanFunctions.memberName = \
16409  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
16410 
16411  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
16412  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
16413  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
16414  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
16415  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
16416  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
16417  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
16418  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
16419  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
16420  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
16421  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
16422  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
16423  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
16424  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
16425  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
16426  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
16427  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
16428 
16429 #if VMA_VULKAN_VERSION >= 1001000
16430  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16431  {
16432  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
16433  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
16434  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
16435  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
16436  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
16437  }
16438 #endif
16439 
16440 #if VMA_DEDICATED_ALLOCATION
16441  if(m_UseKhrDedicatedAllocation)
16442  {
16443  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
16444  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
16445  }
16446 #endif
16447 
16448 #if VMA_BIND_MEMORY2
16449  if(m_UseKhrBindMemory2)
16450  {
16451  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
16452  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
16453  }
16454 #endif // #if VMA_BIND_MEMORY2
16455 
16456 #if VMA_MEMORY_BUDGET
16457  if(m_UseExtMemoryBudget)
16458  {
16459  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
16460  }
16461 #endif // #if VMA_MEMORY_BUDGET
16462 
16463 #undef VMA_FETCH_DEVICE_FUNC
16464 #undef VMA_FETCH_INSTANCE_FUNC
16465 }
16466 
16467 #endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16468 
16469 void VmaAllocator_T::ValidateVulkanFunctions()
16470 {
16471  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
16472  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
16473  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
16474  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
16475  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
16476  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
16477  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
16478  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
16479  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
16480  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
16481  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
16482  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
16483  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
16484  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
16485  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
16486  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
16487  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
16488 
16489 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16490  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
16491  {
16492  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
16493  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
16494  }
16495 #endif
16496 
16497 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
16498  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
16499  {
16500  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
16501  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
16502  }
16503 #endif
16504 
16505 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
16506  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16507  {
16508  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
16509  }
16510 #endif
16511 }
16512 
16513 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
16514 {
16515  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16516  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16517  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
16518  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
16519 }
16520 
16521 VkResult VmaAllocator_T::AllocateMemoryOfType(
16522  VkDeviceSize size,
16523  VkDeviceSize alignment,
16524  bool dedicatedAllocation,
16525  VkBuffer dedicatedBuffer,
16526  VkBufferUsageFlags dedicatedBufferUsage,
16527  VkImage dedicatedImage,
16528  const VmaAllocationCreateInfo& createInfo,
16529  uint32_t memTypeIndex,
16530  VmaSuballocationType suballocType,
16531  size_t allocationCount,
16532  VmaAllocation* pAllocations)
16533 {
16534  VMA_ASSERT(pAllocations != VMA_NULL);
16535  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
16536 
16537  VmaAllocationCreateInfo finalCreateInfo = createInfo;
16538 
16539  // If memory type is not HOST_VISIBLE, disable MAPPED.
16540  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16541  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16542  {
16543  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16544  }
16545  // If memory is lazily allocated, it should be always dedicated.
16546  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
16547  {
16549  }
16550 
16551  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
16552  VMA_ASSERT(blockVector);
16553 
16554  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
16555  bool preferDedicatedMemory =
16556  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
16557  dedicatedAllocation ||
16558  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
16559  size > preferredBlockSize / 2;
16560 
16561  if(preferDedicatedMemory &&
16562  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
16563  finalCreateInfo.pool == VK_NULL_HANDLE)
16564  {
16566  }
16567 
16568  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
16569  {
16570  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16571  {
16572  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16573  }
16574  else
16575  {
16576  return AllocateDedicatedMemory(
16577  size,
16578  suballocType,
16579  memTypeIndex,
16580  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16581  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16582  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16583  finalCreateInfo.pUserData,
16584  finalCreateInfo.priority,
16585  dedicatedBuffer,
16586  dedicatedBufferUsage,
16587  dedicatedImage,
16588  allocationCount,
16589  pAllocations);
16590  }
16591  }
16592  else
16593  {
16594  VkResult res = blockVector->Allocate(
16595  m_CurrentFrameIndex.load(),
16596  size,
16597  alignment,
16598  finalCreateInfo,
16599  suballocType,
16600  allocationCount,
16601  pAllocations);
16602  if(res == VK_SUCCESS)
16603  {
16604  return res;
16605  }
16606 
16607  // 5. Try dedicated memory.
16608  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16609  {
16610  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16611  }
16612 
16613  // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
16614  // which can quickly deplete maxMemoryAllocationCount: Don't try dedicated allocations when above
16615  // 3/4 of the maximum allocation count.
16616  if(m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
16617  {
16618  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16619  }
16620 
16621  res = AllocateDedicatedMemory(
16622  size,
16623  suballocType,
16624  memTypeIndex,
16625  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16626  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16627  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16628  finalCreateInfo.pUserData,
16629  finalCreateInfo.priority,
16630  dedicatedBuffer,
16631  dedicatedBufferUsage,
16632  dedicatedImage,
16633  allocationCount,
16634  pAllocations);
16635  if(res == VK_SUCCESS)
16636  {
16637  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
16638  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
16639  return VK_SUCCESS;
16640  }
16641  else
16642  {
16643  // Everything failed: Return error code.
16644  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16645  return res;
16646  }
16647  }
16648 }
16649 
16650 VkResult VmaAllocator_T::AllocateDedicatedMemory(
16651  VkDeviceSize size,
16652  VmaSuballocationType suballocType,
16653  uint32_t memTypeIndex,
16654  bool withinBudget,
16655  bool map,
16656  bool isUserDataString,
16657  void* pUserData,
16658  float priority,
16659  VkBuffer dedicatedBuffer,
16660  VkBufferUsageFlags dedicatedBufferUsage,
16661  VkImage dedicatedImage,
16662  size_t allocationCount,
16663  VmaAllocation* pAllocations)
16664 {
16665  VMA_ASSERT(allocationCount > 0 && pAllocations);
16666 
16667  if(withinBudget)
16668  {
16669  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16670  VmaBudget heapBudget = {};
16671  GetBudget(&heapBudget, heapIndex, 1);
16672  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
16673  {
16674  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16675  }
16676  }
16677 
16678  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
16679  allocInfo.memoryTypeIndex = memTypeIndex;
16680  allocInfo.allocationSize = size;
16681 
16682 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16683  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
16684  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16685  {
16686  if(dedicatedBuffer != VK_NULL_HANDLE)
16687  {
16688  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
16689  dedicatedAllocInfo.buffer = dedicatedBuffer;
16690  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16691  }
16692  else if(dedicatedImage != VK_NULL_HANDLE)
16693  {
16694  dedicatedAllocInfo.image = dedicatedImage;
16695  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16696  }
16697  }
16698 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16699 
16700 #if VMA_BUFFER_DEVICE_ADDRESS
16701  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
16702  if(m_UseKhrBufferDeviceAddress)
16703  {
16704  bool canContainBufferWithDeviceAddress = true;
16705  if(dedicatedBuffer != VK_NULL_HANDLE)
16706  {
16707  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
16708  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
16709  }
16710  else if(dedicatedImage != VK_NULL_HANDLE)
16711  {
16712  canContainBufferWithDeviceAddress = false;
16713  }
16714  if(canContainBufferWithDeviceAddress)
16715  {
16716  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
16717  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
16718  }
16719  }
16720 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
16721 
16722 #if VMA_MEMORY_PRIORITY
16723  VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
16724  if(m_UseExtMemoryPriority)
16725  {
16726  priorityInfo.priority = priority;
16727  VmaPnextChainPushFront(&allocInfo, &priorityInfo);
16728  }
16729 #endif // #if VMA_MEMORY_PRIORITY
16730 
16731 #if VMA_EXTERNAL_MEMORY
16732  // Attach VkExportMemoryAllocateInfoKHR if necessary.
16733  VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
16734  exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);
16735  if(exportMemoryAllocInfo.handleTypes != 0)
16736  {
16737  VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
16738  }
16739 #endif // #if VMA_EXTERNAL_MEMORY
16740 
16741  size_t allocIndex;
16742  VkResult res = VK_SUCCESS;
16743  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16744  {
16745  res = AllocateDedicatedMemoryPage(
16746  size,
16747  suballocType,
16748  memTypeIndex,
16749  allocInfo,
16750  map,
16751  isUserDataString,
16752  pUserData,
16753  pAllocations + allocIndex);
16754  if(res != VK_SUCCESS)
16755  {
16756  break;
16757  }
16758  }
16759 
16760  if(res == VK_SUCCESS)
16761  {
16762  // Register them in m_DedicatedAllocations.
16763  {
16764  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16765  DedicatedAllocationLinkedList& dedicatedAllocations = m_DedicatedAllocations[memTypeIndex];
16766  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16767  {
16768  dedicatedAllocations.PushBack(pAllocations[allocIndex]);
16769  }
16770  }
16771 
16772  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
16773  }
16774  else
16775  {
16776  // Free all already created allocations.
16777  while(allocIndex--)
16778  {
16779  VmaAllocation currAlloc = pAllocations[allocIndex];
16780  VkDeviceMemory hMemory = currAlloc->GetMemory();
16781 
16782  /*
16783  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16784  before vkFreeMemory.
16785 
16786  if(currAlloc->GetMappedData() != VMA_NULL)
16787  {
16788  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16789  }
16790  */
16791 
16792  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
16793  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
16794  currAlloc->SetUserData(this, VMA_NULL);
16795  m_AllocationObjectAllocator.Free(currAlloc);
16796  }
16797 
16798  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16799  }
16800 
16801  return res;
16802 }
16803 
16804 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
16805  VkDeviceSize size,
16806  VmaSuballocationType suballocType,
16807  uint32_t memTypeIndex,
16808  const VkMemoryAllocateInfo& allocInfo,
16809  bool map,
16810  bool isUserDataString,
16811  void* pUserData,
16812  VmaAllocation* pAllocation)
16813 {
16814  VkDeviceMemory hMemory = VK_NULL_HANDLE;
16815  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
16816  if(res < 0)
16817  {
16818  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16819  return res;
16820  }
16821 
16822  void* pMappedData = VMA_NULL;
16823  if(map)
16824  {
16825  res = (*m_VulkanFunctions.vkMapMemory)(
16826  m_hDevice,
16827  hMemory,
16828  0,
16829  VK_WHOLE_SIZE,
16830  0,
16831  &pMappedData);
16832  if(res < 0)
16833  {
16834  VMA_DEBUG_LOG(" vkMapMemory FAILED");
16835  FreeVulkanMemory(memTypeIndex, size, hMemory);
16836  return res;
16837  }
16838  }
16839 
16840  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
16841  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
16842  (*pAllocation)->SetUserData(this, pUserData);
16843  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
16844  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16845  {
16846  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
16847  }
16848 
16849  return VK_SUCCESS;
16850 }
16851 
16852 void VmaAllocator_T::GetBufferMemoryRequirements(
16853  VkBuffer hBuffer,
16854  VkMemoryRequirements& memReq,
16855  bool& requiresDedicatedAllocation,
16856  bool& prefersDedicatedAllocation) const
16857 {
16858 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16859  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16860  {
16861  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
16862  memReqInfo.buffer = hBuffer;
16863 
16864  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16865 
16866  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16867  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16868 
16869  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16870 
16871  memReq = memReq2.memoryRequirements;
16872  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16873  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16874  }
16875  else
16876 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16877  {
16878  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
16879  requiresDedicatedAllocation = false;
16880  prefersDedicatedAllocation = false;
16881  }
16882 }
16883 
16884 void VmaAllocator_T::GetImageMemoryRequirements(
16885  VkImage hImage,
16886  VkMemoryRequirements& memReq,
16887  bool& requiresDedicatedAllocation,
16888  bool& prefersDedicatedAllocation) const
16889 {
16890 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16891  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16892  {
16893  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
16894  memReqInfo.image = hImage;
16895 
16896  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16897 
16898  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16899  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16900 
16901  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16902 
16903  memReq = memReq2.memoryRequirements;
16904  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16905  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16906  }
16907  else
16908 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16909  {
16910  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
16911  requiresDedicatedAllocation = false;
16912  prefersDedicatedAllocation = false;
16913  }
16914 }
16915 
16916 VkResult VmaAllocator_T::AllocateMemory(
16917  const VkMemoryRequirements& vkMemReq,
16918  bool requiresDedicatedAllocation,
16919  bool prefersDedicatedAllocation,
16920  VkBuffer dedicatedBuffer,
16921  VkBufferUsageFlags dedicatedBufferUsage,
16922  VkImage dedicatedImage,
16923  const VmaAllocationCreateInfo& createInfo,
16924  VmaSuballocationType suballocType,
16925  size_t allocationCount,
16926  VmaAllocation* pAllocations)
16927 {
16928  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16929 
16930  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
16931 
16932  if(vkMemReq.size == 0)
16933  {
16934  return VK_ERROR_VALIDATION_FAILED_EXT;
16935  }
16936  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
16937  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16938  {
16939  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
16940  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16941  }
16942  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16944  {
16945  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
16946  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16947  }
16948  if(requiresDedicatedAllocation)
16949  {
16950  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16951  {
16952  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
16953  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16954  }
16955  if(createInfo.pool != VK_NULL_HANDLE)
16956  {
16957  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
16958  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16959  }
16960  }
16961  if((createInfo.pool != VK_NULL_HANDLE) &&
16962  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
16963  {
16964  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
16965  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16966  }
16967 
16968  if(createInfo.pool != VK_NULL_HANDLE)
16969  {
16970  VmaAllocationCreateInfo createInfoForPool = createInfo;
16971  // If memory type is not HOST_VISIBLE, disable MAPPED.
16972  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16973  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16974  {
16975  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16976  }
16977 
16978  return createInfo.pool->m_BlockVector.Allocate(
16979  m_CurrentFrameIndex.load(),
16980  vkMemReq.size,
16981  vkMemReq.alignment,
16982  createInfoForPool,
16983  suballocType,
16984  allocationCount,
16985  pAllocations);
16986  }
16987  else
16988  {
16989  // Bit mask of memory Vulkan types acceptable for this allocation.
16990  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
16991  uint32_t memTypeIndex = UINT32_MAX;
16992  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16993  if(res == VK_SUCCESS)
16994  {
16995  res = AllocateMemoryOfType(
16996  vkMemReq.size,
16997  vkMemReq.alignment,
16998  requiresDedicatedAllocation || prefersDedicatedAllocation,
16999  dedicatedBuffer,
17000  dedicatedBufferUsage,
17001  dedicatedImage,
17002  createInfo,
17003  memTypeIndex,
17004  suballocType,
17005  allocationCount,
17006  pAllocations);
17007  // Succeeded on first try.
17008  if(res == VK_SUCCESS)
17009  {
17010  return res;
17011  }
17012  // Allocation from this memory type failed. Try other compatible memory types.
17013  else
17014  {
17015  for(;;)
17016  {
17017  // Remove old memTypeIndex from list of possibilities.
17018  memoryTypeBits &= ~(1u << memTypeIndex);
17019  // Find alternative memTypeIndex.
17020  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
17021  if(res == VK_SUCCESS)
17022  {
17023  res = AllocateMemoryOfType(
17024  vkMemReq.size,
17025  vkMemReq.alignment,
17026  requiresDedicatedAllocation || prefersDedicatedAllocation,
17027  dedicatedBuffer,
17028  dedicatedBufferUsage,
17029  dedicatedImage,
17030  createInfo,
17031  memTypeIndex,
17032  suballocType,
17033  allocationCount,
17034  pAllocations);
17035  // Allocation from this alternative memory type succeeded.
17036  if(res == VK_SUCCESS)
17037  {
17038  return res;
17039  }
17040  // else: Allocation from this memory type failed. Try next one - next loop iteration.
17041  }
17042  // No other matching memory type index could be found.
17043  else
17044  {
17045  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
17046  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
17047  }
17048  }
17049  }
17050  }
17051  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
17052  else
17053  return res;
17054  }
17055 }
17056 
17057 void VmaAllocator_T::FreeMemory(
17058  size_t allocationCount,
17059  const VmaAllocation* pAllocations)
17060 {
17061  VMA_ASSERT(pAllocations);
17062 
17063  for(size_t allocIndex = allocationCount; allocIndex--; )
17064  {
17065  VmaAllocation allocation = pAllocations[allocIndex];
17066 
17067  if(allocation != VK_NULL_HANDLE)
17068  {
17069  if(TouchAllocation(allocation))
17070  {
17071  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
17072  {
17073  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
17074  }
17075 
17076  switch(allocation->GetType())
17077  {
17078  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17079  {
17080  VmaBlockVector* pBlockVector = VMA_NULL;
17081  VmaPool hPool = allocation->GetBlock()->GetParentPool();
17082  if(hPool != VK_NULL_HANDLE)
17083  {
17084  pBlockVector = &hPool->m_BlockVector;
17085  }
17086  else
17087  {
17088  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17089  pBlockVector = m_pBlockVectors[memTypeIndex];
17090  }
17091  pBlockVector->Free(allocation);
17092  }
17093  break;
17094  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17095  FreeDedicatedMemory(allocation);
17096  break;
17097  default:
17098  VMA_ASSERT(0);
17099  }
17100  }
17101 
17102  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
17103  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
17104  allocation->SetUserData(this, VMA_NULL);
17105  m_AllocationObjectAllocator.Free(allocation);
17106  }
17107  }
17108 }
17109 
17110 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
17111 {
17112  // Initialize.
17113  InitStatInfo(pStats->total);
17114  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
17115  InitStatInfo(pStats->memoryType[i]);
17116  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
17117  InitStatInfo(pStats->memoryHeap[i]);
17118 
17119  // Process default pools.
17120  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17121  {
17122  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
17123  VMA_ASSERT(pBlockVector);
17124  pBlockVector->AddStats(pStats);
17125  }
17126 
17127  // Process custom pools.
17128  {
17129  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17130  for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
17131  {
17132  pool->m_BlockVector.AddStats(pStats);
17133  }
17134  }
17135 
17136  // Process dedicated allocations.
17137  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17138  {
17139  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
17140  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17141  DedicatedAllocationLinkedList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
17142  for(VmaAllocation alloc = dedicatedAllocList.Front();
17143  alloc != VMA_NULL; alloc = dedicatedAllocList.GetNext(alloc))
17144  {
17145  VmaStatInfo allocationStatInfo;
17146  alloc->DedicatedAllocCalcStatsInfo(allocationStatInfo);
17147  VmaAddStatInfo(pStats->total, allocationStatInfo);
17148  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
17149  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
17150  }
17151  }
17152 
17153  // Postprocess.
17154  VmaPostprocessCalcStatInfo(pStats->total);
17155  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
17156  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
17157  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
17158  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
17159 }
17160 
17161 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
17162 {
17163 #if VMA_MEMORY_BUDGET
17164  if(m_UseExtMemoryBudget)
17165  {
17166  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
17167  {
17168  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
17169  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
17170  {
17171  const uint32_t heapIndex = firstHeap + i;
17172 
17173  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
17174  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
17175 
17176  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
17177  {
17178  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
17179  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
17180  }
17181  else
17182  {
17183  outBudget->usage = 0;
17184  }
17185 
17186  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
17187  outBudget->budget = VMA_MIN(
17188  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
17189  }
17190  }
17191  else
17192  {
17193  UpdateVulkanBudget(); // Outside of mutex lock
17194  GetBudget(outBudget, firstHeap, heapCount); // Recursion
17195  }
17196  }
17197  else
17198 #endif
17199  {
17200  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
17201  {
17202  const uint32_t heapIndex = firstHeap + i;
17203 
17204  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
17205  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
17206 
17207  outBudget->usage = outBudget->blockBytes;
17208  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
17209  }
17210  }
17211 }
17212 
17213 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
17214 
17215 VkResult VmaAllocator_T::DefragmentationBegin(
17216  const VmaDefragmentationInfo2& info,
17217  VmaDefragmentationStats* pStats,
17218  VmaDefragmentationContext* pContext)
17219 {
17220  if(info.pAllocationsChanged != VMA_NULL)
17221  {
17222  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
17223  }
17224 
17225  *pContext = vma_new(this, VmaDefragmentationContext_T)(
17226  this, m_CurrentFrameIndex.load(), info.flags, pStats);
17227 
17228  (*pContext)->AddPools(info.poolCount, info.pPools);
17229  (*pContext)->AddAllocations(
17231 
17232  VkResult res = (*pContext)->Defragment(
17235  info.commandBuffer, pStats, info.flags);
17236 
17237  if(res != VK_NOT_READY)
17238  {
17239  vma_delete(this, *pContext);
17240  *pContext = VMA_NULL;
17241  }
17242 
17243  return res;
17244 }
17245 
17246 VkResult VmaAllocator_T::DefragmentationEnd(
17247  VmaDefragmentationContext context)
17248 {
17249  vma_delete(this, context);
17250  return VK_SUCCESS;
17251 }
17252 
17253 VkResult VmaAllocator_T::DefragmentationPassBegin(
17255  VmaDefragmentationContext context)
17256 {
17257  return context->DefragmentPassBegin(pInfo);
17258 }
17259 VkResult VmaAllocator_T::DefragmentationPassEnd(
17260  VmaDefragmentationContext context)
17261 {
17262  return context->DefragmentPassEnd();
17263 
17264 }
17265 
17266 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
17267 {
17268  if(hAllocation->CanBecomeLost())
17269  {
17270  /*
17271  Warning: This is a carefully designed algorithm.
17272  Do not modify unless you really know what you're doing :)
17273  */
17274  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17275  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17276  for(;;)
17277  {
17278  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
17279  {
17280  pAllocationInfo->memoryType = UINT32_MAX;
17281  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
17282  pAllocationInfo->offset = 0;
17283  pAllocationInfo->size = hAllocation->GetSize();
17284  pAllocationInfo->pMappedData = VMA_NULL;
17285  pAllocationInfo->pUserData = hAllocation->GetUserData();
17286  return;
17287  }
17288  else if(localLastUseFrameIndex == localCurrFrameIndex)
17289  {
17290  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
17291  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
17292  pAllocationInfo->offset = hAllocation->GetOffset();
17293  pAllocationInfo->size = hAllocation->GetSize();
17294  pAllocationInfo->pMappedData = VMA_NULL;
17295  pAllocationInfo->pUserData = hAllocation->GetUserData();
17296  return;
17297  }
17298  else // Last use time earlier than current time.
17299  {
17300  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17301  {
17302  localLastUseFrameIndex = localCurrFrameIndex;
17303  }
17304  }
17305  }
17306  }
17307  else
17308  {
17309 #if VMA_STATS_STRING_ENABLED
17310  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17311  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17312  for(;;)
17313  {
17314  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
17315  if(localLastUseFrameIndex == localCurrFrameIndex)
17316  {
17317  break;
17318  }
17319  else // Last use time earlier than current time.
17320  {
17321  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17322  {
17323  localLastUseFrameIndex = localCurrFrameIndex;
17324  }
17325  }
17326  }
17327 #endif
17328 
17329  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
17330  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
17331  pAllocationInfo->offset = hAllocation->GetOffset();
17332  pAllocationInfo->size = hAllocation->GetSize();
17333  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
17334  pAllocationInfo->pUserData = hAllocation->GetUserData();
17335  }
17336 }
17337 
17338 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
17339 {
17340  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
17341  if(hAllocation->CanBecomeLost())
17342  {
17343  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17344  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17345  for(;;)
17346  {
17347  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
17348  {
17349  return false;
17350  }
17351  else if(localLastUseFrameIndex == localCurrFrameIndex)
17352  {
17353  return true;
17354  }
17355  else // Last use time earlier than current time.
17356  {
17357  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17358  {
17359  localLastUseFrameIndex = localCurrFrameIndex;
17360  }
17361  }
17362  }
17363  }
17364  else
17365  {
17366 #if VMA_STATS_STRING_ENABLED
17367  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17368  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17369  for(;;)
17370  {
17371  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
17372  if(localLastUseFrameIndex == localCurrFrameIndex)
17373  {
17374  break;
17375  }
17376  else // Last use time earlier than current time.
17377  {
17378  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17379  {
17380  localLastUseFrameIndex = localCurrFrameIndex;
17381  }
17382  }
17383  }
17384 #endif
17385 
17386  return true;
17387  }
17388 }
17389 
17390 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
17391 {
17392  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
17393 
17394  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
17395 
17396  // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
17397  if(pCreateInfo->pMemoryAllocateNext)
17398  {
17399  VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
17400  }
17401 
17402  if(newCreateInfo.maxBlockCount == 0)
17403  {
17404  newCreateInfo.maxBlockCount = SIZE_MAX;
17405  }
17406  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
17407  {
17408  return VK_ERROR_INITIALIZATION_FAILED;
17409  }
17410  // Memory type index out of range or forbidden.
17411  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
17412  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
17413  {
17414  return VK_ERROR_FEATURE_NOT_PRESENT;
17415  }
17416  if(newCreateInfo.minAllocationAlignment > 0)
17417  {
17418  VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
17419  }
17420 
17421  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
17422 
17423  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
17424 
17425  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
17426  if(res != VK_SUCCESS)
17427  {
17428  vma_delete(this, *pPool);
17429  *pPool = VMA_NULL;
17430  return res;
17431  }
17432 
17433  // Add to m_Pools.
17434  {
17435  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
17436  (*pPool)->SetId(m_NextPoolId++);
17437  m_Pools.PushBack(*pPool);
17438  }
17439 
17440  return VK_SUCCESS;
17441 }
17442 
17443 void VmaAllocator_T::DestroyPool(VmaPool pool)
17444 {
17445  // Remove from m_Pools.
17446  {
17447  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
17448  m_Pools.Remove(pool);
17449  }
17450 
17451  vma_delete(this, pool);
17452 }
17453 
17454 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
17455 {
17456  pool->m_BlockVector.GetPoolStats(pPoolStats);
17457 }
17458 
17459 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
17460 {
17461  m_CurrentFrameIndex.store(frameIndex);
17462 
17463 #if VMA_MEMORY_BUDGET
17464  if(m_UseExtMemoryBudget)
17465  {
17466  UpdateVulkanBudget();
17467  }
17468 #endif // #if VMA_MEMORY_BUDGET
17469 }
17470 
17471 void VmaAllocator_T::MakePoolAllocationsLost(
17472  VmaPool hPool,
17473  size_t* pLostAllocationCount)
17474 {
17475  hPool->m_BlockVector.MakePoolAllocationsLost(
17476  m_CurrentFrameIndex.load(),
17477  pLostAllocationCount);
17478 }
17479 
17480 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
17481 {
17482  return hPool->m_BlockVector.CheckCorruption();
17483 }
17484 
17485 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
17486 {
17487  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
17488 
17489  // Process default pools.
17490  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17491  {
17492  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
17493  {
17494  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
17495  VMA_ASSERT(pBlockVector);
17496  VkResult localRes = pBlockVector->CheckCorruption();
17497  switch(localRes)
17498  {
17499  case VK_ERROR_FEATURE_NOT_PRESENT:
17500  break;
17501  case VK_SUCCESS:
17502  finalRes = VK_SUCCESS;
17503  break;
17504  default:
17505  return localRes;
17506  }
17507  }
17508  }
17509 
17510  // Process custom pools.
17511  {
17512  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17513  for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
17514  {
17515  if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
17516  {
17517  VkResult localRes = pool->m_BlockVector.CheckCorruption();
17518  switch(localRes)
17519  {
17520  case VK_ERROR_FEATURE_NOT_PRESENT:
17521  break;
17522  case VK_SUCCESS:
17523  finalRes = VK_SUCCESS;
17524  break;
17525  default:
17526  return localRes;
17527  }
17528  }
17529  }
17530  }
17531 
17532  return finalRes;
17533 }
17534 
17535 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
17536 {
17537  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
17538  (*pAllocation)->InitLost();
17539 }
17540 
17541 // An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
17542 template<typename T>
17543 struct AtomicTransactionalIncrement
17544 {
17545 public:
17546  typedef std::atomic<T> AtomicT;
17547  ~AtomicTransactionalIncrement()
17548  {
17549  if(m_Atomic)
17550  --(*m_Atomic);
17551  }
17552  T Increment(AtomicT* atomic)
17553  {
17554  m_Atomic = atomic;
17555  return m_Atomic->fetch_add(1);
17556  }
17557  void Commit()
17558  {
17559  m_Atomic = nullptr;
17560  }
17561 
17562 private:
17563  AtomicT* m_Atomic = nullptr;
17564 };
17565 
17566 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
17567 {
17568  AtomicTransactionalIncrement<uint32_t> deviceMemoryCountIncrement;
17569  const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
17570 #if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
17571  if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
17572  {
17573  return VK_ERROR_TOO_MANY_OBJECTS;
17574  }
17575 #endif
17576 
17577  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
17578 
17579  // HeapSizeLimit is in effect for this heap.
17580  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
17581  {
17582  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
17583  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
17584  for(;;)
17585  {
17586  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
17587  if(blockBytesAfterAllocation > heapSize)
17588  {
17589  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
17590  }
17591  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
17592  {
17593  break;
17594  }
17595  }
17596  }
17597  else
17598  {
17599  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
17600  }
17601 
17602  // VULKAN CALL vkAllocateMemory.
17603  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
17604 
17605  if(res == VK_SUCCESS)
17606  {
17607 #if VMA_MEMORY_BUDGET
17608  ++m_Budget.m_OperationsSinceBudgetFetch;
17609 #endif
17610 
17611  // Informative callback.
17612  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
17613  {
17614  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
17615  }
17616 
17617  deviceMemoryCountIncrement.Commit();
17618  }
17619  else
17620  {
17621  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
17622  }
17623 
17624  return res;
17625 }
17626 
17627 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
17628 {
17629  // Informative callback.
17630  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
17631  {
17632  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
17633  }
17634 
17635  // VULKAN CALL vkFreeMemory.
17636  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
17637 
17638  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
17639 
17640  --m_DeviceMemoryCount;
17641 }
17642 
17643 VkResult VmaAllocator_T::BindVulkanBuffer(
17644  VkDeviceMemory memory,
17645  VkDeviceSize memoryOffset,
17646  VkBuffer buffer,
17647  const void* pNext)
17648 {
17649  if(pNext != VMA_NULL)
17650  {
17651 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17652  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17653  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
17654  {
17655  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
17656  bindBufferMemoryInfo.pNext = pNext;
17657  bindBufferMemoryInfo.buffer = buffer;
17658  bindBufferMemoryInfo.memory = memory;
17659  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17660  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17661  }
17662  else
17663 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17664  {
17665  return VK_ERROR_EXTENSION_NOT_PRESENT;
17666  }
17667  }
17668  else
17669  {
17670  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
17671  }
17672 }
17673 
17674 VkResult VmaAllocator_T::BindVulkanImage(
17675  VkDeviceMemory memory,
17676  VkDeviceSize memoryOffset,
17677  VkImage image,
17678  const void* pNext)
17679 {
17680  if(pNext != VMA_NULL)
17681  {
17682 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17683  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17684  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
17685  {
17686  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
17687  bindBufferMemoryInfo.pNext = pNext;
17688  bindBufferMemoryInfo.image = image;
17689  bindBufferMemoryInfo.memory = memory;
17690  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17691  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17692  }
17693  else
17694 #endif // #if VMA_BIND_MEMORY2
17695  {
17696  return VK_ERROR_EXTENSION_NOT_PRESENT;
17697  }
17698  }
17699  else
17700  {
17701  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
17702  }
17703 }
17704 
17705 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
17706 {
17707  if(hAllocation->CanBecomeLost())
17708  {
17709  return VK_ERROR_MEMORY_MAP_FAILED;
17710  }
17711 
17712  switch(hAllocation->GetType())
17713  {
17714  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17715  {
17716  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17717  char *pBytes = VMA_NULL;
17718  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
17719  if(res == VK_SUCCESS)
17720  {
17721  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
17722  hAllocation->BlockAllocMap();
17723  }
17724  return res;
17725  }
17726  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17727  return hAllocation->DedicatedAllocMap(this, ppData);
17728  default:
17729  VMA_ASSERT(0);
17730  return VK_ERROR_MEMORY_MAP_FAILED;
17731  }
17732 }
17733 
17734 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
17735 {
17736  switch(hAllocation->GetType())
17737  {
17738  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17739  {
17740  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17741  hAllocation->BlockAllocUnmap();
17742  pBlock->Unmap(this, 1);
17743  }
17744  break;
17745  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17746  hAllocation->DedicatedAllocUnmap(this);
17747  break;
17748  default:
17749  VMA_ASSERT(0);
17750  }
17751 }
17752 
17753 VkResult VmaAllocator_T::BindBufferMemory(
17754  VmaAllocation hAllocation,
17755  VkDeviceSize allocationLocalOffset,
17756  VkBuffer hBuffer,
17757  const void* pNext)
17758 {
17759  VkResult res = VK_SUCCESS;
17760  switch(hAllocation->GetType())
17761  {
17762  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17763  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
17764  break;
17765  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17766  {
17767  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17768  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
17769  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
17770  break;
17771  }
17772  default:
17773  VMA_ASSERT(0);
17774  }
17775  return res;
17776 }
17777 
17778 VkResult VmaAllocator_T::BindImageMemory(
17779  VmaAllocation hAllocation,
17780  VkDeviceSize allocationLocalOffset,
17781  VkImage hImage,
17782  const void* pNext)
17783 {
17784  VkResult res = VK_SUCCESS;
17785  switch(hAllocation->GetType())
17786  {
17787  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17788  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
17789  break;
17790  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17791  {
17792  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
17793  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
17794  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
17795  break;
17796  }
17797  default:
17798  VMA_ASSERT(0);
17799  }
17800  return res;
17801 }
17802 
17803 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
17804  VmaAllocation hAllocation,
17805  VkDeviceSize offset, VkDeviceSize size,
17806  VMA_CACHE_OPERATION op)
17807 {
17808  VkResult res = VK_SUCCESS;
17809 
17810  VkMappedMemoryRange memRange = {};
17811  if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
17812  {
17813  switch(op)
17814  {
17815  case VMA_CACHE_FLUSH:
17816  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
17817  break;
17818  case VMA_CACHE_INVALIDATE:
17819  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
17820  break;
17821  default:
17822  VMA_ASSERT(0);
17823  }
17824  }
17825  // else: Just ignore this call.
17826  return res;
17827 }
17828 
17829 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
17830  uint32_t allocationCount,
17831  const VmaAllocation* allocations,
17832  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
17833  VMA_CACHE_OPERATION op)
17834 {
17835  typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
17836  typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
17837  RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
17838 
17839  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
17840  {
17841  const VmaAllocation alloc = allocations[allocIndex];
17842  const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
17843  const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
17844  VkMappedMemoryRange newRange;
17845  if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
17846  {
17847  ranges.push_back(newRange);
17848  }
17849  }
17850 
17851  VkResult res = VK_SUCCESS;
17852  if(!ranges.empty())
17853  {
17854  switch(op)
17855  {
17856  case VMA_CACHE_FLUSH:
17857  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17858  break;
17859  case VMA_CACHE_INVALIDATE:
17860  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17861  break;
17862  default:
17863  VMA_ASSERT(0);
17864  }
17865  }
17866  // else: Just ignore this call.
17867  return res;
17868 }
17869 
17870 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
17871 {
17872  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
17873 
17874  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17875  {
17876  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17877  DedicatedAllocationLinkedList& dedicatedAllocations = m_DedicatedAllocations[memTypeIndex];
17878  dedicatedAllocations.Remove(allocation);
17879  }
17880 
17881  VkDeviceMemory hMemory = allocation->GetMemory();
17882 
17883  /*
17884  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
17885  before vkFreeMemory.
17886 
17887  if(allocation->GetMappedData() != VMA_NULL)
17888  {
17889  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
17890  }
17891  */
17892 
17893  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
17894 
17895  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
17896 }
17897 
17898 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
17899 {
17900  VkBufferCreateInfo dummyBufCreateInfo;
17901  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
17902 
17903  uint32_t memoryTypeBits = 0;
17904 
17905  // Create buffer.
17906  VkBuffer buf = VK_NULL_HANDLE;
17907  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
17908  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
17909  if(res == VK_SUCCESS)
17910  {
17911  // Query for supported memory types.
17912  VkMemoryRequirements memReq;
17913  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
17914  memoryTypeBits = memReq.memoryTypeBits;
17915 
17916  // Destroy buffer.
17917  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
17918  }
17919 
17920  return memoryTypeBits;
17921 }
17922 
17923 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
17924 {
17925  // Make sure memory information is already fetched.
17926  VMA_ASSERT(GetMemoryTypeCount() > 0);
17927 
17928  uint32_t memoryTypeBits = UINT32_MAX;
17929 
17930  if(!m_UseAmdDeviceCoherentMemory)
17931  {
17932  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
17933  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17934  {
17935  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17936  {
17937  memoryTypeBits &= ~(1u << memTypeIndex);
17938  }
17939  }
17940  }
17941 
17942  return memoryTypeBits;
17943 }
17944 
17945 bool VmaAllocator_T::GetFlushOrInvalidateRange(
17946  VmaAllocation allocation,
17947  VkDeviceSize offset, VkDeviceSize size,
17948  VkMappedMemoryRange& outRange) const
17949 {
17950  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17951  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
17952  {
17953  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
17954  const VkDeviceSize allocationSize = allocation->GetSize();
17955  VMA_ASSERT(offset <= allocationSize);
17956 
17957  outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
17958  outRange.pNext = VMA_NULL;
17959  outRange.memory = allocation->GetMemory();
17960 
17961  switch(allocation->GetType())
17962  {
17963  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17964  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17965  if(size == VK_WHOLE_SIZE)
17966  {
17967  outRange.size = allocationSize - outRange.offset;
17968  }
17969  else
17970  {
17971  VMA_ASSERT(offset + size <= allocationSize);
17972  outRange.size = VMA_MIN(
17973  VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
17974  allocationSize - outRange.offset);
17975  }
17976  break;
17977  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17978  {
17979  // 1. Still within this allocation.
17980  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17981  if(size == VK_WHOLE_SIZE)
17982  {
17983  size = allocationSize - offset;
17984  }
17985  else
17986  {
17987  VMA_ASSERT(offset + size <= allocationSize);
17988  }
17989  outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
17990 
17991  // 2. Adjust to whole block.
17992  const VkDeviceSize allocationOffset = allocation->GetOffset();
17993  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
17994  const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
17995  outRange.offset += allocationOffset;
17996  outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
17997 
17998  break;
17999  }
18000  default:
18001  VMA_ASSERT(0);
18002  }
18003  return true;
18004  }
18005  return false;
18006 }
18007 
18008 #if VMA_MEMORY_BUDGET
18009 
18010 void VmaAllocator_T::UpdateVulkanBudget()
18011 {
18012  VMA_ASSERT(m_UseExtMemoryBudget);
18013 
18014  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
18015 
18016  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
18017  VmaPnextChainPushFront(&memProps, &budgetProps);
18018 
18019  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
18020 
18021  {
18022  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
18023 
18024  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
18025  {
18026  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
18027  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
18028  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
18029 
18030  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
18031  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
18032  {
18033  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
18034  }
18035  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
18036  {
18037  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
18038  }
18039  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
18040  {
18041  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
18042  }
18043  }
18044  m_Budget.m_OperationsSinceBudgetFetch = 0;
18045  }
18046 }
18047 
18048 #endif // #if VMA_MEMORY_BUDGET
18049 
18050 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
18051 {
18052  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
18053  !hAllocation->CanBecomeLost() &&
18054  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
18055  {
18056  void* pData = VMA_NULL;
18057  VkResult res = Map(hAllocation, &pData);
18058  if(res == VK_SUCCESS)
18059  {
18060  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
18061  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
18062  Unmap(hAllocation);
18063  }
18064  else
18065  {
18066  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
18067  }
18068  }
18069 }
18070 
18071 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
18072 {
18073  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
18074  if(memoryTypeBits == UINT32_MAX)
18075  {
18076  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
18077  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
18078  }
18079  return memoryTypeBits;
18080 }
18081 
18082 #if VMA_STATS_STRING_ENABLED
18083 
18084 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
18085 {
18086  bool dedicatedAllocationsStarted = false;
18087  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
18088  {
18089  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
18090  DedicatedAllocationLinkedList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
18091  if(!dedicatedAllocList.IsEmpty())
18092  {
18093  if(dedicatedAllocationsStarted == false)
18094  {
18095  dedicatedAllocationsStarted = true;
18096  json.WriteString("DedicatedAllocations");
18097  json.BeginObject();
18098  }
18099 
18100  json.BeginString("Type ");
18101  json.ContinueString(memTypeIndex);
18102  json.EndString();
18103 
18104  json.BeginArray();
18105 
18106  for(VmaAllocation alloc = dedicatedAllocList.Front();
18107  alloc != VMA_NULL; alloc = dedicatedAllocList.GetNext(alloc))
18108  {
18109  json.BeginObject(true);
18110  alloc->PrintParameters(json);
18111  json.EndObject();
18112  }
18113 
18114  json.EndArray();
18115  }
18116  }
18117  if(dedicatedAllocationsStarted)
18118  {
18119  json.EndObject();
18120  }
18121 
18122  {
18123  bool allocationsStarted = false;
18124  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
18125  {
18126  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
18127  {
18128  if(allocationsStarted == false)
18129  {
18130  allocationsStarted = true;
18131  json.WriteString("DefaultPools");
18132  json.BeginObject();
18133  }
18134 
18135  json.BeginString("Type ");
18136  json.ContinueString(memTypeIndex);
18137  json.EndString();
18138 
18139  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
18140  }
18141  }
18142  if(allocationsStarted)
18143  {
18144  json.EndObject();
18145  }
18146  }
18147 
18148  // Custom pools
18149  {
18150  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
18151  if(!m_Pools.IsEmpty())
18152  {
18153  json.WriteString("Pools");
18154  json.BeginObject();
18155  for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
18156  {
18157  json.BeginString();
18158  json.ContinueString(pool->GetId());
18159  json.EndString();
18160 
18161  pool->m_BlockVector.PrintDetailedMap(json);
18162  }
18163  json.EndObject();
18164  }
18165  }
18166 }
18167 
18168 #endif // #if VMA_STATS_STRING_ENABLED
18169 
18171 // Public interface
18172 
18173 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
18174  const VmaAllocatorCreateInfo* pCreateInfo,
18175  VmaAllocator* pAllocator)
18176 {
18177  VMA_ASSERT(pCreateInfo && pAllocator);
18178  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
18179  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
18180  VMA_DEBUG_LOG("vmaCreateAllocator");
18181  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
18182  return (*pAllocator)->Init(pCreateInfo);
18183 }
18184 
18185 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
18186  VmaAllocator allocator)
18187 {
18188  if(allocator != VK_NULL_HANDLE)
18189  {
18190  VMA_DEBUG_LOG("vmaDestroyAllocator");
18191  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
18192  vma_delete(&allocationCallbacks, allocator);
18193  }
18194 }
18195 
18196 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
18197 {
18198  VMA_ASSERT(allocator && pAllocatorInfo);
18199  pAllocatorInfo->instance = allocator->m_hInstance;
18200  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
18201  pAllocatorInfo->device = allocator->m_hDevice;
18202 }
18203 
18204 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
18205  VmaAllocator allocator,
18206  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
18207 {
18208  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
18209  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
18210 }
18211 
18212 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
18213  VmaAllocator allocator,
18214  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
18215 {
18216  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
18217  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
18218 }
18219 
18220 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
18221  VmaAllocator allocator,
18222  uint32_t memoryTypeIndex,
18223  VkMemoryPropertyFlags* pFlags)
18224 {
18225  VMA_ASSERT(allocator && pFlags);
18226  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
18227  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
18228 }
18229 
18230 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
18231  VmaAllocator allocator,
18232  uint32_t frameIndex)
18233 {
18234  VMA_ASSERT(allocator);
18235  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
18236 
18237  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18238 
18239  allocator->SetCurrentFrameIndex(frameIndex);
18240 }
18241 
18242 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
18243  VmaAllocator allocator,
18244  VmaStats* pStats)
18245 {
18246  VMA_ASSERT(allocator && pStats);
18247  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18248  allocator->CalculateStats(pStats);
18249 }
18250 
18251 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
18252  VmaAllocator allocator,
18253  VmaBudget* pBudget)
18254 {
18255  VMA_ASSERT(allocator && pBudget);
18256  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18257  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
18258 }
18259 
18260 #if VMA_STATS_STRING_ENABLED
18261 
18262 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
18263  VmaAllocator allocator,
18264  char** ppStatsString,
18265  VkBool32 detailedMap)
18266 {
18267  VMA_ASSERT(allocator && ppStatsString);
18268  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18269 
18270  VmaStringBuilder sb(allocator);
18271  {
18272  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
18273  json.BeginObject();
18274 
18275  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
18276  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
18277 
18278  VmaStats stats;
18279  allocator->CalculateStats(&stats);
18280 
18281  json.WriteString("Total");
18282  VmaPrintStatInfo(json, stats.total);
18283 
18284  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
18285  {
18286  json.BeginString("Heap ");
18287  json.ContinueString(heapIndex);
18288  json.EndString();
18289  json.BeginObject();
18290 
18291  json.WriteString("Size");
18292  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
18293 
18294  json.WriteString("Flags");
18295  json.BeginArray(true);
18296  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
18297  {
18298  json.WriteString("DEVICE_LOCAL");
18299  }
18300  json.EndArray();
18301 
18302  json.WriteString("Budget");
18303  json.BeginObject();
18304  {
18305  json.WriteString("BlockBytes");
18306  json.WriteNumber(budget[heapIndex].blockBytes);
18307  json.WriteString("AllocationBytes");
18308  json.WriteNumber(budget[heapIndex].allocationBytes);
18309  json.WriteString("Usage");
18310  json.WriteNumber(budget[heapIndex].usage);
18311  json.WriteString("Budget");
18312  json.WriteNumber(budget[heapIndex].budget);
18313  }
18314  json.EndObject();
18315 
18316  if(stats.memoryHeap[heapIndex].blockCount > 0)
18317  {
18318  json.WriteString("Stats");
18319  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
18320  }
18321 
18322  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
18323  {
18324  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
18325  {
18326  json.BeginString("Type ");
18327  json.ContinueString(typeIndex);
18328  json.EndString();
18329 
18330  json.BeginObject();
18331 
18332  json.WriteString("Flags");
18333  json.BeginArray(true);
18334  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
18335  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
18336  {
18337  json.WriteString("DEVICE_LOCAL");
18338  }
18339  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
18340  {
18341  json.WriteString("HOST_VISIBLE");
18342  }
18343  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
18344  {
18345  json.WriteString("HOST_COHERENT");
18346  }
18347  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
18348  {
18349  json.WriteString("HOST_CACHED");
18350  }
18351  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
18352  {
18353  json.WriteString("LAZILY_ALLOCATED");
18354  }
18355 #if VMA_VULKAN_VERSION >= 1001000
18356  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
18357  {
18358  json.WriteString("PROTECTED");
18359  }
18360 #endif // #if VMA_VULKAN_VERSION >= 1001000
18361 #if VK_AMD_device_coherent_memory
18362  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
18363  {
18364  json.WriteString("DEVICE_COHERENT");
18365  }
18366  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
18367  {
18368  json.WriteString("DEVICE_UNCACHED");
18369  }
18370 #endif // #if VK_AMD_device_coherent_memory
18371  json.EndArray();
18372 
18373  if(stats.memoryType[typeIndex].blockCount > 0)
18374  {
18375  json.WriteString("Stats");
18376  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
18377  }
18378 
18379  json.EndObject();
18380  }
18381  }
18382 
18383  json.EndObject();
18384  }
18385  if(detailedMap == VK_TRUE)
18386  {
18387  allocator->PrintDetailedMap(json);
18388  }
18389 
18390  json.EndObject();
18391  }
18392 
18393  const size_t len = sb.GetLength();
18394  char* const pChars = vma_new_array(allocator, char, len + 1);
18395  if(len > 0)
18396  {
18397  memcpy(pChars, sb.GetData(), len);
18398  }
18399  pChars[len] = '\0';
18400  *ppStatsString = pChars;
18401 }
18402 
18403 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
18404  VmaAllocator allocator,
18405  char* pStatsString)
18406 {
18407  if(pStatsString != VMA_NULL)
18408  {
18409  VMA_ASSERT(allocator);
18410  size_t len = strlen(pStatsString);
18411  vma_delete_array(allocator, pStatsString, len + 1);
18412  }
18413 }
18414 
18415 #endif // #if VMA_STATS_STRING_ENABLED
18416 
18417 /*
18418 This function is not protected by any mutex because it just reads immutable data.
18419 */
18420 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
18421  VmaAllocator allocator,
18422  uint32_t memoryTypeBits,
18423  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18424  uint32_t* pMemoryTypeIndex)
18425 {
18426  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18427  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18428  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18429 
18430  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
18431 
18432  if(pAllocationCreateInfo->memoryTypeBits != 0)
18433  {
18434  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
18435  }
18436 
18437  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
18438  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
18439  uint32_t notPreferredFlags = 0;
18440 
18441  // Convert usage to requiredFlags and preferredFlags.
18442  switch(pAllocationCreateInfo->usage)
18443  {
18445  break;
18447  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
18448  {
18449  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18450  }
18451  break;
18453  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
18454  break;
18456  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
18457  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
18458  {
18459  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18460  }
18461  break;
18463  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
18464  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
18465  break;
18467  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18468  break;
18470  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
18471  break;
18472  default:
18473  VMA_ASSERT(0);
18474  break;
18475  }
18476 
18477  // Avoid DEVICE_COHERENT unless explicitly requested.
18478  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
18479  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
18480  {
18481  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
18482  }
18483 
18484  *pMemoryTypeIndex = UINT32_MAX;
18485  uint32_t minCost = UINT32_MAX;
18486  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
18487  memTypeIndex < allocator->GetMemoryTypeCount();
18488  ++memTypeIndex, memTypeBit <<= 1)
18489  {
18490  // This memory type is acceptable according to memoryTypeBits bitmask.
18491  if((memTypeBit & memoryTypeBits) != 0)
18492  {
18493  const VkMemoryPropertyFlags currFlags =
18494  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
18495  // This memory type contains requiredFlags.
18496  if((requiredFlags & ~currFlags) == 0)
18497  {
18498  // Calculate cost as number of bits from preferredFlags not present in this memory type.
18499  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
18500  VmaCountBitsSet(currFlags & notPreferredFlags);
18501  // Remember memory type with lowest cost.
18502  if(currCost < minCost)
18503  {
18504  *pMemoryTypeIndex = memTypeIndex;
18505  if(currCost == 0)
18506  {
18507  return VK_SUCCESS;
18508  }
18509  minCost = currCost;
18510  }
18511  }
18512  }
18513  }
18514  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
18515 }
18516 
18517 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
18518  VmaAllocator allocator,
18519  const VkBufferCreateInfo* pBufferCreateInfo,
18520  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18521  uint32_t* pMemoryTypeIndex)
18522 {
18523  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18524  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
18525  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18526  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18527 
18528  const VkDevice hDev = allocator->m_hDevice;
18529  VkBuffer hBuffer = VK_NULL_HANDLE;
18530  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
18531  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
18532  if(res == VK_SUCCESS)
18533  {
18534  VkMemoryRequirements memReq = {};
18535  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
18536  hDev, hBuffer, &memReq);
18537 
18538  res = vmaFindMemoryTypeIndex(
18539  allocator,
18540  memReq.memoryTypeBits,
18541  pAllocationCreateInfo,
18542  pMemoryTypeIndex);
18543 
18544  allocator->GetVulkanFunctions().vkDestroyBuffer(
18545  hDev, hBuffer, allocator->GetAllocationCallbacks());
18546  }
18547  return res;
18548 }
18549 
18550 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
18551  VmaAllocator allocator,
18552  const VkImageCreateInfo* pImageCreateInfo,
18553  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18554  uint32_t* pMemoryTypeIndex)
18555 {
18556  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18557  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
18558  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18559  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18560 
18561  const VkDevice hDev = allocator->m_hDevice;
18562  VkImage hImage = VK_NULL_HANDLE;
18563  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
18564  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
18565  if(res == VK_SUCCESS)
18566  {
18567  VkMemoryRequirements memReq = {};
18568  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
18569  hDev, hImage, &memReq);
18570 
18571  res = vmaFindMemoryTypeIndex(
18572  allocator,
18573  memReq.memoryTypeBits,
18574  pAllocationCreateInfo,
18575  pMemoryTypeIndex);
18576 
18577  allocator->GetVulkanFunctions().vkDestroyImage(
18578  hDev, hImage, allocator->GetAllocationCallbacks());
18579  }
18580  return res;
18581 }
18582 
18583 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
18584  VmaAllocator allocator,
18585  const VmaPoolCreateInfo* pCreateInfo,
18586  VmaPool* pPool)
18587 {
18588  VMA_ASSERT(allocator && pCreateInfo && pPool);
18589 
18590  VMA_DEBUG_LOG("vmaCreatePool");
18591 
18592  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18593 
18594  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
18595 
18596 #if VMA_RECORDING_ENABLED
18597  if(allocator->GetRecorder() != VMA_NULL)
18598  {
18599  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
18600  }
18601 #endif
18602 
18603  return res;
18604 }
18605 
18606 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
18607  VmaAllocator allocator,
18608  VmaPool pool)
18609 {
18610  VMA_ASSERT(allocator);
18611 
18612  if(pool == VK_NULL_HANDLE)
18613  {
18614  return;
18615  }
18616 
18617  VMA_DEBUG_LOG("vmaDestroyPool");
18618 
18619  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18620 
18621 #if VMA_RECORDING_ENABLED
18622  if(allocator->GetRecorder() != VMA_NULL)
18623  {
18624  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
18625  }
18626 #endif
18627 
18628  allocator->DestroyPool(pool);
18629 }
18630 
18631 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
18632  VmaAllocator allocator,
18633  VmaPool pool,
18634  VmaPoolStats* pPoolStats)
18635 {
18636  VMA_ASSERT(allocator && pool && pPoolStats);
18637 
18638  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18639 
18640  allocator->GetPoolStats(pool, pPoolStats);
18641 }
18642 
18643 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
18644  VmaAllocator allocator,
18645  VmaPool pool,
18646  size_t* pLostAllocationCount)
18647 {
18648  VMA_ASSERT(allocator && pool);
18649 
18650  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18651 
18652 #if VMA_RECORDING_ENABLED
18653  if(allocator->GetRecorder() != VMA_NULL)
18654  {
18655  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
18656  }
18657 #endif
18658 
18659  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
18660 }
18661 
18662 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
18663 {
18664  VMA_ASSERT(allocator && pool);
18665 
18666  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18667 
18668  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
18669 
18670  return allocator->CheckPoolCorruption(pool);
18671 }
18672 
18673 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
18674  VmaAllocator allocator,
18675  VmaPool pool,
18676  const char** ppName)
18677 {
18678  VMA_ASSERT(allocator && pool && ppName);
18679 
18680  VMA_DEBUG_LOG("vmaGetPoolName");
18681 
18682  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18683 
18684  *ppName = pool->GetName();
18685 }
18686 
18687 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
18688  VmaAllocator allocator,
18689  VmaPool pool,
18690  const char* pName)
18691 {
18692  VMA_ASSERT(allocator && pool);
18693 
18694  VMA_DEBUG_LOG("vmaSetPoolName");
18695 
18696  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18697 
18698  pool->SetName(pName);
18699 
18700 #if VMA_RECORDING_ENABLED
18701  if(allocator->GetRecorder() != VMA_NULL)
18702  {
18703  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
18704  }
18705 #endif
18706 }
18707 
18708 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
18709  VmaAllocator allocator,
18710  const VkMemoryRequirements* pVkMemoryRequirements,
18711  const VmaAllocationCreateInfo* pCreateInfo,
18712  VmaAllocation* pAllocation,
18713  VmaAllocationInfo* pAllocationInfo)
18714 {
18715  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
18716 
18717  VMA_DEBUG_LOG("vmaAllocateMemory");
18718 
18719  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18720 
18721  VkResult result = allocator->AllocateMemory(
18722  *pVkMemoryRequirements,
18723  false, // requiresDedicatedAllocation
18724  false, // prefersDedicatedAllocation
18725  VK_NULL_HANDLE, // dedicatedBuffer
18726  UINT32_MAX, // dedicatedBufferUsage
18727  VK_NULL_HANDLE, // dedicatedImage
18728  *pCreateInfo,
18729  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18730  1, // allocationCount
18731  pAllocation);
18732 
18733 #if VMA_RECORDING_ENABLED
18734  if(allocator->GetRecorder() != VMA_NULL)
18735  {
18736  allocator->GetRecorder()->RecordAllocateMemory(
18737  allocator->GetCurrentFrameIndex(),
18738  *pVkMemoryRequirements,
18739  *pCreateInfo,
18740  *pAllocation);
18741  }
18742 #endif
18743 
18744  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18745  {
18746  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18747  }
18748 
18749  return result;
18750 }
18751 
18752 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
18753  VmaAllocator allocator,
18754  const VkMemoryRequirements* pVkMemoryRequirements,
18755  const VmaAllocationCreateInfo* pCreateInfo,
18756  size_t allocationCount,
18757  VmaAllocation* pAllocations,
18758  VmaAllocationInfo* pAllocationInfo)
18759 {
18760  if(allocationCount == 0)
18761  {
18762  return VK_SUCCESS;
18763  }
18764 
18765  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
18766 
18767  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
18768 
18769  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18770 
18771  VkResult result = allocator->AllocateMemory(
18772  *pVkMemoryRequirements,
18773  false, // requiresDedicatedAllocation
18774  false, // prefersDedicatedAllocation
18775  VK_NULL_HANDLE, // dedicatedBuffer
18776  UINT32_MAX, // dedicatedBufferUsage
18777  VK_NULL_HANDLE, // dedicatedImage
18778  *pCreateInfo,
18779  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18780  allocationCount,
18781  pAllocations);
18782 
18783 #if VMA_RECORDING_ENABLED
18784  if(allocator->GetRecorder() != VMA_NULL)
18785  {
18786  allocator->GetRecorder()->RecordAllocateMemoryPages(
18787  allocator->GetCurrentFrameIndex(),
18788  *pVkMemoryRequirements,
18789  *pCreateInfo,
18790  (uint64_t)allocationCount,
18791  pAllocations);
18792  }
18793 #endif
18794 
18795  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18796  {
18797  for(size_t i = 0; i < allocationCount; ++i)
18798  {
18799  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
18800  }
18801  }
18802 
18803  return result;
18804 }
18805 
18806 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
18807  VmaAllocator allocator,
18808  VkBuffer buffer,
18809  const VmaAllocationCreateInfo* pCreateInfo,
18810  VmaAllocation* pAllocation,
18811  VmaAllocationInfo* pAllocationInfo)
18812 {
18813  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18814 
18815  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
18816 
18817  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18818 
18819  VkMemoryRequirements vkMemReq = {};
18820  bool requiresDedicatedAllocation = false;
18821  bool prefersDedicatedAllocation = false;
18822  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
18823  requiresDedicatedAllocation,
18824  prefersDedicatedAllocation);
18825 
18826  VkResult result = allocator->AllocateMemory(
18827  vkMemReq,
18828  requiresDedicatedAllocation,
18829  prefersDedicatedAllocation,
18830  buffer, // dedicatedBuffer
18831  UINT32_MAX, // dedicatedBufferUsage
18832  VK_NULL_HANDLE, // dedicatedImage
18833  *pCreateInfo,
18834  VMA_SUBALLOCATION_TYPE_BUFFER,
18835  1, // allocationCount
18836  pAllocation);
18837 
18838 #if VMA_RECORDING_ENABLED
18839  if(allocator->GetRecorder() != VMA_NULL)
18840  {
18841  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
18842  allocator->GetCurrentFrameIndex(),
18843  vkMemReq,
18844  requiresDedicatedAllocation,
18845  prefersDedicatedAllocation,
18846  *pCreateInfo,
18847  *pAllocation);
18848  }
18849 #endif
18850 
18851  if(pAllocationInfo && result == VK_SUCCESS)
18852  {
18853  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18854  }
18855 
18856  return result;
18857 }
18858 
18859 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
18860  VmaAllocator allocator,
18861  VkImage image,
18862  const VmaAllocationCreateInfo* pCreateInfo,
18863  VmaAllocation* pAllocation,
18864  VmaAllocationInfo* pAllocationInfo)
18865 {
18866  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18867 
18868  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
18869 
18870  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18871 
18872  VkMemoryRequirements vkMemReq = {};
18873  bool requiresDedicatedAllocation = false;
18874  bool prefersDedicatedAllocation = false;
18875  allocator->GetImageMemoryRequirements(image, vkMemReq,
18876  requiresDedicatedAllocation, prefersDedicatedAllocation);
18877 
18878  VkResult result = allocator->AllocateMemory(
18879  vkMemReq,
18880  requiresDedicatedAllocation,
18881  prefersDedicatedAllocation,
18882  VK_NULL_HANDLE, // dedicatedBuffer
18883  UINT32_MAX, // dedicatedBufferUsage
18884  image, // dedicatedImage
18885  *pCreateInfo,
18886  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
18887  1, // allocationCount
18888  pAllocation);
18889 
18890 #if VMA_RECORDING_ENABLED
18891  if(allocator->GetRecorder() != VMA_NULL)
18892  {
18893  allocator->GetRecorder()->RecordAllocateMemoryForImage(
18894  allocator->GetCurrentFrameIndex(),
18895  vkMemReq,
18896  requiresDedicatedAllocation,
18897  prefersDedicatedAllocation,
18898  *pCreateInfo,
18899  *pAllocation);
18900  }
18901 #endif
18902 
18903  if(pAllocationInfo && result == VK_SUCCESS)
18904  {
18905  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18906  }
18907 
18908  return result;
18909 }
18910 
18911 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
18912  VmaAllocator allocator,
18913  VmaAllocation allocation)
18914 {
18915  VMA_ASSERT(allocator);
18916 
18917  if(allocation == VK_NULL_HANDLE)
18918  {
18919  return;
18920  }
18921 
18922  VMA_DEBUG_LOG("vmaFreeMemory");
18923 
18924  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18925 
18926 #if VMA_RECORDING_ENABLED
18927  if(allocator->GetRecorder() != VMA_NULL)
18928  {
18929  allocator->GetRecorder()->RecordFreeMemory(
18930  allocator->GetCurrentFrameIndex(),
18931  allocation);
18932  }
18933 #endif
18934 
18935  allocator->FreeMemory(
18936  1, // allocationCount
18937  &allocation);
18938 }
18939 
18940 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
18941  VmaAllocator allocator,
18942  size_t allocationCount,
18943  const VmaAllocation* pAllocations)
18944 {
18945  if(allocationCount == 0)
18946  {
18947  return;
18948  }
18949 
18950  VMA_ASSERT(allocator);
18951 
18952  VMA_DEBUG_LOG("vmaFreeMemoryPages");
18953 
18954  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18955 
18956 #if VMA_RECORDING_ENABLED
18957  if(allocator->GetRecorder() != VMA_NULL)
18958  {
18959  allocator->GetRecorder()->RecordFreeMemoryPages(
18960  allocator->GetCurrentFrameIndex(),
18961  (uint64_t)allocationCount,
18962  pAllocations);
18963  }
18964 #endif
18965 
18966  allocator->FreeMemory(allocationCount, pAllocations);
18967 }
18968 
18969 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
18970  VmaAllocator allocator,
18971  VmaAllocation allocation,
18972  VmaAllocationInfo* pAllocationInfo)
18973 {
18974  VMA_ASSERT(allocator && allocation && pAllocationInfo);
18975 
18976  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18977 
18978 #if VMA_RECORDING_ENABLED
18979  if(allocator->GetRecorder() != VMA_NULL)
18980  {
18981  allocator->GetRecorder()->RecordGetAllocationInfo(
18982  allocator->GetCurrentFrameIndex(),
18983  allocation);
18984  }
18985 #endif
18986 
18987  allocator->GetAllocationInfo(allocation, pAllocationInfo);
18988 }
18989 
18990 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
18991  VmaAllocator allocator,
18992  VmaAllocation allocation)
18993 {
18994  VMA_ASSERT(allocator && allocation);
18995 
18996  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18997 
18998 #if VMA_RECORDING_ENABLED
18999  if(allocator->GetRecorder() != VMA_NULL)
19000  {
19001  allocator->GetRecorder()->RecordTouchAllocation(
19002  allocator->GetCurrentFrameIndex(),
19003  allocation);
19004  }
19005 #endif
19006 
19007  return allocator->TouchAllocation(allocation);
19008 }
19009 
19010 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
19011  VmaAllocator allocator,
19012  VmaAllocation allocation,
19013  void* pUserData)
19014 {
19015  VMA_ASSERT(allocator && allocation);
19016 
19017  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19018 
19019  allocation->SetUserData(allocator, pUserData);
19020 
19021 #if VMA_RECORDING_ENABLED
19022  if(allocator->GetRecorder() != VMA_NULL)
19023  {
19024  allocator->GetRecorder()->RecordSetAllocationUserData(
19025  allocator->GetCurrentFrameIndex(),
19026  allocation,
19027  pUserData);
19028  }
19029 #endif
19030 }
19031 
19032 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
19033  VmaAllocator allocator,
19034  VmaAllocation* pAllocation)
19035 {
19036  VMA_ASSERT(allocator && pAllocation);
19037 
19038  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
19039 
19040  allocator->CreateLostAllocation(pAllocation);
19041 
19042 #if VMA_RECORDING_ENABLED
19043  if(allocator->GetRecorder() != VMA_NULL)
19044  {
19045  allocator->GetRecorder()->RecordCreateLostAllocation(
19046  allocator->GetCurrentFrameIndex(),
19047  *pAllocation);
19048  }
19049 #endif
19050 }
19051 
19052 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
19053  VmaAllocator allocator,
19054  VmaAllocation allocation,
19055  void** ppData)
19056 {
19057  VMA_ASSERT(allocator && allocation && ppData);
19058 
19059  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19060 
19061  VkResult res = allocator->Map(allocation, ppData);
19062 
19063 #if VMA_RECORDING_ENABLED
19064  if(allocator->GetRecorder() != VMA_NULL)
19065  {
19066  allocator->GetRecorder()->RecordMapMemory(
19067  allocator->GetCurrentFrameIndex(),
19068  allocation);
19069  }
19070 #endif
19071 
19072  return res;
19073 }
19074 
19075 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
19076  VmaAllocator allocator,
19077  VmaAllocation allocation)
19078 {
19079  VMA_ASSERT(allocator && allocation);
19080 
19081  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19082 
19083 #if VMA_RECORDING_ENABLED
19084  if(allocator->GetRecorder() != VMA_NULL)
19085  {
19086  allocator->GetRecorder()->RecordUnmapMemory(
19087  allocator->GetCurrentFrameIndex(),
19088  allocation);
19089  }
19090 #endif
19091 
19092  allocator->Unmap(allocation);
19093 }
19094 
19095 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
19096 {
19097  VMA_ASSERT(allocator && allocation);
19098 
19099  VMA_DEBUG_LOG("vmaFlushAllocation");
19100 
19101  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19102 
19103  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
19104 
19105 #if VMA_RECORDING_ENABLED
19106  if(allocator->GetRecorder() != VMA_NULL)
19107  {
19108  allocator->GetRecorder()->RecordFlushAllocation(
19109  allocator->GetCurrentFrameIndex(),
19110  allocation, offset, size);
19111  }
19112 #endif
19113 
19114  return res;
19115 }
19116 
19117 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
19118 {
19119  VMA_ASSERT(allocator && allocation);
19120 
19121  VMA_DEBUG_LOG("vmaInvalidateAllocation");
19122 
19123  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19124 
19125  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
19126 
19127 #if VMA_RECORDING_ENABLED
19128  if(allocator->GetRecorder() != VMA_NULL)
19129  {
19130  allocator->GetRecorder()->RecordInvalidateAllocation(
19131  allocator->GetCurrentFrameIndex(),
19132  allocation, offset, size);
19133  }
19134 #endif
19135 
19136  return res;
19137 }
19138 
19139 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
19140  VmaAllocator allocator,
19141  uint32_t allocationCount,
19142  const VmaAllocation* allocations,
19143  const VkDeviceSize* offsets,
19144  const VkDeviceSize* sizes)
19145 {
19146  VMA_ASSERT(allocator);
19147 
19148  if(allocationCount == 0)
19149  {
19150  return VK_SUCCESS;
19151  }
19152 
19153  VMA_ASSERT(allocations);
19154 
19155  VMA_DEBUG_LOG("vmaFlushAllocations");
19156 
19157  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19158 
19159  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
19160 
19161 #if VMA_RECORDING_ENABLED
19162  if(allocator->GetRecorder() != VMA_NULL)
19163  {
19164  //TODO
19165  }
19166 #endif
19167 
19168  return res;
19169 }
19170 
19171 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
19172  VmaAllocator allocator,
19173  uint32_t allocationCount,
19174  const VmaAllocation* allocations,
19175  const VkDeviceSize* offsets,
19176  const VkDeviceSize* sizes)
19177 {
19178  VMA_ASSERT(allocator);
19179 
19180  if(allocationCount == 0)
19181  {
19182  return VK_SUCCESS;
19183  }
19184 
19185  VMA_ASSERT(allocations);
19186 
19187  VMA_DEBUG_LOG("vmaInvalidateAllocations");
19188 
19189  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19190 
19191  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
19192 
19193 #if VMA_RECORDING_ENABLED
19194  if(allocator->GetRecorder() != VMA_NULL)
19195  {
19196  //TODO
19197  }
19198 #endif
19199 
19200  return res;
19201 }
19202 
19203 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
19204 {
19205  VMA_ASSERT(allocator);
19206 
19207  VMA_DEBUG_LOG("vmaCheckCorruption");
19208 
19209  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19210 
19211  return allocator->CheckCorruption(memoryTypeBits);
19212 }
19213 
19214 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
19215  VmaAllocator allocator,
19216  const VmaAllocation* pAllocations,
19217  size_t allocationCount,
19218  VkBool32* pAllocationsChanged,
19219  const VmaDefragmentationInfo *pDefragmentationInfo,
19220  VmaDefragmentationStats* pDefragmentationStats)
19221 {
19222  // Deprecated interface, reimplemented using new one.
19223 
19224  VmaDefragmentationInfo2 info2 = {};
19225  info2.allocationCount = (uint32_t)allocationCount;
19226  info2.pAllocations = pAllocations;
19227  info2.pAllocationsChanged = pAllocationsChanged;
19228  if(pDefragmentationInfo != VMA_NULL)
19229  {
19230  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
19231  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
19232  }
19233  else
19234  {
19235  info2.maxCpuAllocationsToMove = UINT32_MAX;
19236  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
19237  }
19238  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
19239 
19241  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
19242  if(res == VK_NOT_READY)
19243  {
19244  res = vmaDefragmentationEnd( allocator, ctx);
19245  }
19246  return res;
19247 }
19248 
19249 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
19250  VmaAllocator allocator,
19251  const VmaDefragmentationInfo2* pInfo,
19252  VmaDefragmentationStats* pStats,
19253  VmaDefragmentationContext *pContext)
19254 {
19255  VMA_ASSERT(allocator && pInfo && pContext);
19256 
19257  // Degenerate case: Nothing to defragment.
19258  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
19259  {
19260  return VK_SUCCESS;
19261  }
19262 
19263  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
19264  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
19265  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
19266  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
19267 
19268  VMA_DEBUG_LOG("vmaDefragmentationBegin");
19269 
19270  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19271 
19272  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
19273 
19274 #if VMA_RECORDING_ENABLED
19275  if(allocator->GetRecorder() != VMA_NULL)
19276  {
19277  allocator->GetRecorder()->RecordDefragmentationBegin(
19278  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
19279  }
19280 #endif
19281 
19282  return res;
19283 }
19284 
19285 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
19286  VmaAllocator allocator,
19287  VmaDefragmentationContext context)
19288 {
19289  VMA_ASSERT(allocator);
19290 
19291  VMA_DEBUG_LOG("vmaDefragmentationEnd");
19292 
19293  if(context != VK_NULL_HANDLE)
19294  {
19295  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19296 
19297 #if VMA_RECORDING_ENABLED
19298  if(allocator->GetRecorder() != VMA_NULL)
19299  {
19300  allocator->GetRecorder()->RecordDefragmentationEnd(
19301  allocator->GetCurrentFrameIndex(), context);
19302  }
19303 #endif
19304 
19305  return allocator->DefragmentationEnd(context);
19306  }
19307  else
19308  {
19309  return VK_SUCCESS;
19310  }
19311 }
19312 
19313 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
19314  VmaAllocator allocator,
19315  VmaDefragmentationContext context,
19317  )
19318 {
19319  VMA_ASSERT(allocator);
19320  VMA_ASSERT(pInfo);
19321 
19322  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
19323 
19324  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19325 
19326  if(context == VK_NULL_HANDLE)
19327  {
19328  pInfo->moveCount = 0;
19329  return VK_SUCCESS;
19330  }
19331 
19332  return allocator->DefragmentationPassBegin(pInfo, context);
19333 }
19334 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
19335  VmaAllocator allocator,
19336  VmaDefragmentationContext context)
19337 {
19338  VMA_ASSERT(allocator);
19339 
19340  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
19341  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19342 
19343  if(context == VK_NULL_HANDLE)
19344  return VK_SUCCESS;
19345 
19346  return allocator->DefragmentationPassEnd(context);
19347 }
19348 
19349 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
19350  VmaAllocator allocator,
19351  VmaAllocation allocation,
19352  VkBuffer buffer)
19353 {
19354  VMA_ASSERT(allocator && allocation && buffer);
19355 
19356  VMA_DEBUG_LOG("vmaBindBufferMemory");
19357 
19358  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19359 
19360  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
19361 }
19362 
19363 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
19364  VmaAllocator allocator,
19365  VmaAllocation allocation,
19366  VkDeviceSize allocationLocalOffset,
19367  VkBuffer buffer,
19368  const void* pNext)
19369 {
19370  VMA_ASSERT(allocator && allocation && buffer);
19371 
19372  VMA_DEBUG_LOG("vmaBindBufferMemory2");
19373 
19374  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19375 
19376  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
19377 }
19378 
19379 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
19380  VmaAllocator allocator,
19381  VmaAllocation allocation,
19382  VkImage image)
19383 {
19384  VMA_ASSERT(allocator && allocation && image);
19385 
19386  VMA_DEBUG_LOG("vmaBindImageMemory");
19387 
19388  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19389 
19390  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
19391 }
19392 
19393 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
19394  VmaAllocator allocator,
19395  VmaAllocation allocation,
19396  VkDeviceSize allocationLocalOffset,
19397  VkImage image,
19398  const void* pNext)
19399 {
19400  VMA_ASSERT(allocator && allocation && image);
19401 
19402  VMA_DEBUG_LOG("vmaBindImageMemory2");
19403 
19404  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19405 
19406  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
19407 }
19408 
19409 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
19410  VmaAllocator allocator,
19411  const VkBufferCreateInfo* pBufferCreateInfo,
19412  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19413  VkBuffer* pBuffer,
19414  VmaAllocation* pAllocation,
19415  VmaAllocationInfo* pAllocationInfo)
19416 {
19417  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
19418 
19419  if(pBufferCreateInfo->size == 0)
19420  {
19421  return VK_ERROR_VALIDATION_FAILED_EXT;
19422  }
19423  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
19424  !allocator->m_UseKhrBufferDeviceAddress)
19425  {
19426  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
19427  return VK_ERROR_VALIDATION_FAILED_EXT;
19428  }
19429 
19430  VMA_DEBUG_LOG("vmaCreateBuffer");
19431 
19432  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19433 
19434  *pBuffer = VK_NULL_HANDLE;
19435  *pAllocation = VK_NULL_HANDLE;
19436 
19437  // 1. Create VkBuffer.
19438  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
19439  allocator->m_hDevice,
19440  pBufferCreateInfo,
19441  allocator->GetAllocationCallbacks(),
19442  pBuffer);
19443  if(res >= 0)
19444  {
19445  // 2. vkGetBufferMemoryRequirements.
19446  VkMemoryRequirements vkMemReq = {};
19447  bool requiresDedicatedAllocation = false;
19448  bool prefersDedicatedAllocation = false;
19449  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
19450  requiresDedicatedAllocation, prefersDedicatedAllocation);
19451 
19452  // 3. Allocate memory using allocator.
19453  res = allocator->AllocateMemory(
19454  vkMemReq,
19455  requiresDedicatedAllocation,
19456  prefersDedicatedAllocation,
19457  *pBuffer, // dedicatedBuffer
19458  pBufferCreateInfo->usage, // dedicatedBufferUsage
19459  VK_NULL_HANDLE, // dedicatedImage
19460  *pAllocationCreateInfo,
19461  VMA_SUBALLOCATION_TYPE_BUFFER,
19462  1, // allocationCount
19463  pAllocation);
19464 
19465 #if VMA_RECORDING_ENABLED
19466  if(allocator->GetRecorder() != VMA_NULL)
19467  {
19468  allocator->GetRecorder()->RecordCreateBuffer(
19469  allocator->GetCurrentFrameIndex(),
19470  *pBufferCreateInfo,
19471  *pAllocationCreateInfo,
19472  *pAllocation);
19473  }
19474 #endif
19475 
19476  if(res >= 0)
19477  {
19478  // 3. Bind buffer with memory.
19479  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19480  {
19481  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
19482  }
19483  if(res >= 0)
19484  {
19485  // All steps succeeded.
19486  #if VMA_STATS_STRING_ENABLED
19487  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
19488  #endif
19489  if(pAllocationInfo != VMA_NULL)
19490  {
19491  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19492  }
19493 
19494  return VK_SUCCESS;
19495  }
19496  allocator->FreeMemory(
19497  1, // allocationCount
19498  pAllocation);
19499  *pAllocation = VK_NULL_HANDLE;
19500  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19501  *pBuffer = VK_NULL_HANDLE;
19502  return res;
19503  }
19504  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19505  *pBuffer = VK_NULL_HANDLE;
19506  return res;
19507  }
19508  return res;
19509 }
19510 
19511 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
19512  VmaAllocator allocator,
19513  const VkBufferCreateInfo* pBufferCreateInfo,
19514  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19515  VkDeviceSize minAlignment,
19516  VkBuffer* pBuffer,
19517  VmaAllocation* pAllocation,
19518  VmaAllocationInfo* pAllocationInfo)
19519 {
19520  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation);
19521 
19522  if(pBufferCreateInfo->size == 0)
19523  {
19524  return VK_ERROR_VALIDATION_FAILED_EXT;
19525  }
19526  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
19527  !allocator->m_UseKhrBufferDeviceAddress)
19528  {
19529  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
19530  return VK_ERROR_VALIDATION_FAILED_EXT;
19531  }
19532 
19533  VMA_DEBUG_LOG("vmaCreateBufferWithAlignment");
19534 
19535  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19536 
19537  *pBuffer = VK_NULL_HANDLE;
19538  *pAllocation = VK_NULL_HANDLE;
19539 
19540  // 1. Create VkBuffer.
19541  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
19542  allocator->m_hDevice,
19543  pBufferCreateInfo,
19544  allocator->GetAllocationCallbacks(),
19545  pBuffer);
19546  if(res >= 0)
19547  {
19548  // 2. vkGetBufferMemoryRequirements.
19549  VkMemoryRequirements vkMemReq = {};
19550  bool requiresDedicatedAllocation = false;
19551  bool prefersDedicatedAllocation = false;
19552  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
19553  requiresDedicatedAllocation, prefersDedicatedAllocation);
19554 
19555  // 2a. Include minAlignment
19556  vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment);
19557 
19558  // 3. Allocate memory using allocator.
19559  res = allocator->AllocateMemory(
19560  vkMemReq,
19561  requiresDedicatedAllocation,
19562  prefersDedicatedAllocation,
19563  *pBuffer, // dedicatedBuffer
19564  pBufferCreateInfo->usage, // dedicatedBufferUsage
19565  VK_NULL_HANDLE, // dedicatedImage
19566  *pAllocationCreateInfo,
19567  VMA_SUBALLOCATION_TYPE_BUFFER,
19568  1, // allocationCount
19569  pAllocation);
19570 
19571 #if VMA_RECORDING_ENABLED
19572  if(allocator->GetRecorder() != VMA_NULL)
19573  {
19574  VMA_ASSERT(0 && "Not implemented.");
19575  }
19576 #endif
19577 
19578  if(res >= 0)
19579  {
19580  // 3. Bind buffer with memory.
19581  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19582  {
19583  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
19584  }
19585  if(res >= 0)
19586  {
19587  // All steps succeeded.
19588  #if VMA_STATS_STRING_ENABLED
19589  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
19590  #endif
19591  if(pAllocationInfo != VMA_NULL)
19592  {
19593  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19594  }
19595 
19596  return VK_SUCCESS;
19597  }
19598  allocator->FreeMemory(
19599  1, // allocationCount
19600  pAllocation);
19601  *pAllocation = VK_NULL_HANDLE;
19602  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19603  *pBuffer = VK_NULL_HANDLE;
19604  return res;
19605  }
19606  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19607  *pBuffer = VK_NULL_HANDLE;
19608  return res;
19609  }
19610  return res;
19611 }
19612 
19613 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
19614  VmaAllocator allocator,
19615  VkBuffer buffer,
19616  VmaAllocation allocation)
19617 {
19618  VMA_ASSERT(allocator);
19619 
19620  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19621  {
19622  return;
19623  }
19624 
19625  VMA_DEBUG_LOG("vmaDestroyBuffer");
19626 
19627  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19628 
19629 #if VMA_RECORDING_ENABLED
19630  if(allocator->GetRecorder() != VMA_NULL)
19631  {
19632  allocator->GetRecorder()->RecordDestroyBuffer(
19633  allocator->GetCurrentFrameIndex(),
19634  allocation);
19635  }
19636 #endif
19637 
19638  if(buffer != VK_NULL_HANDLE)
19639  {
19640  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
19641  }
19642 
19643  if(allocation != VK_NULL_HANDLE)
19644  {
19645  allocator->FreeMemory(
19646  1, // allocationCount
19647  &allocation);
19648  }
19649 }
19650 
19651 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
19652  VmaAllocator allocator,
19653  const VkImageCreateInfo* pImageCreateInfo,
19654  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19655  VkImage* pImage,
19656  VmaAllocation* pAllocation,
19657  VmaAllocationInfo* pAllocationInfo)
19658 {
19659  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
19660 
19661  if(pImageCreateInfo->extent.width == 0 ||
19662  pImageCreateInfo->extent.height == 0 ||
19663  pImageCreateInfo->extent.depth == 0 ||
19664  pImageCreateInfo->mipLevels == 0 ||
19665  pImageCreateInfo->arrayLayers == 0)
19666  {
19667  return VK_ERROR_VALIDATION_FAILED_EXT;
19668  }
19669 
19670  VMA_DEBUG_LOG("vmaCreateImage");
19671 
19672  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19673 
19674  *pImage = VK_NULL_HANDLE;
19675  *pAllocation = VK_NULL_HANDLE;
19676 
19677  // 1. Create VkImage.
19678  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
19679  allocator->m_hDevice,
19680  pImageCreateInfo,
19681  allocator->GetAllocationCallbacks(),
19682  pImage);
19683  if(res >= 0)
19684  {
19685  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
19686  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
19687  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
19688 
19689  // 2. Allocate memory using allocator.
19690  VkMemoryRequirements vkMemReq = {};
19691  bool requiresDedicatedAllocation = false;
19692  bool prefersDedicatedAllocation = false;
19693  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
19694  requiresDedicatedAllocation, prefersDedicatedAllocation);
19695 
19696  res = allocator->AllocateMemory(
19697  vkMemReq,
19698  requiresDedicatedAllocation,
19699  prefersDedicatedAllocation,
19700  VK_NULL_HANDLE, // dedicatedBuffer
19701  UINT32_MAX, // dedicatedBufferUsage
19702  *pImage, // dedicatedImage
19703  *pAllocationCreateInfo,
19704  suballocType,
19705  1, // allocationCount
19706  pAllocation);
19707 
19708 #if VMA_RECORDING_ENABLED
19709  if(allocator->GetRecorder() != VMA_NULL)
19710  {
19711  allocator->GetRecorder()->RecordCreateImage(
19712  allocator->GetCurrentFrameIndex(),
19713  *pImageCreateInfo,
19714  *pAllocationCreateInfo,
19715  *pAllocation);
19716  }
19717 #endif
19718 
19719  if(res >= 0)
19720  {
19721  // 3. Bind image with memory.
19722  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19723  {
19724  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
19725  }
19726  if(res >= 0)
19727  {
19728  // All steps succeeded.
19729  #if VMA_STATS_STRING_ENABLED
19730  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
19731  #endif
19732  if(pAllocationInfo != VMA_NULL)
19733  {
19734  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19735  }
19736 
19737  return VK_SUCCESS;
19738  }
19739  allocator->FreeMemory(
19740  1, // allocationCount
19741  pAllocation);
19742  *pAllocation = VK_NULL_HANDLE;
19743  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19744  *pImage = VK_NULL_HANDLE;
19745  return res;
19746  }
19747  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19748  *pImage = VK_NULL_HANDLE;
19749  return res;
19750  }
19751  return res;
19752 }
19753 
19754 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
19755  VmaAllocator allocator,
19756  VkImage image,
19757  VmaAllocation allocation)
19758 {
19759  VMA_ASSERT(allocator);
19760 
19761  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19762  {
19763  return;
19764  }
19765 
19766  VMA_DEBUG_LOG("vmaDestroyImage");
19767 
19768  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19769 
19770 #if VMA_RECORDING_ENABLED
19771  if(allocator->GetRecorder() != VMA_NULL)
19772  {
19773  allocator->GetRecorder()->RecordDestroyImage(
19774  allocator->GetCurrentFrameIndex(),
19775  allocation);
19776  }
19777 #endif
19778 
19779  if(image != VK_NULL_HANDLE)
19780  {
19781  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
19782  }
19783  if(allocation != VK_NULL_HANDLE)
19784  {
19785  allocator->FreeMemory(
19786  1, // allocationCount
19787  &allocation);
19788  }
19789 }
19790 
19791 #endif // #ifdef VMA_IMPLEMENTATION
Definition: vk_mem_alloc.h:2897
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2923
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2929
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2915
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2936
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2910
float priority
A floating-point value between 0 and 1, indicating the priority of the allocation relative to other m...
Definition: vk_mem_alloc.h:2943
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2905
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2899
Represents single memory allocation.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:3264
VkDeviceSize offset
Offset in VkDeviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:3288
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:3308
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:3269
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:3299
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:3313
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:3278
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2419
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2424
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2450
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2475
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2421
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null.
Definition: vk_mem_alloc.h:2481
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2433
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2493
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2430
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2488
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2427
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2502
const VkExternalMemoryHandleTypeFlagsKHR * pTypeExternalMemoryHandleTypes
Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
Definition: vk_mem_alloc.h:2513
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2436
Represents main object of this library initialized.
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2529
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2544
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2534
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2539
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2635
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2638
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2649
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2659
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2670
Represents Opaque object that represents started defragmentation process.
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3663
const VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3703
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3669
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3723
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3718
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3666
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3684
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3687
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3732
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3713
const VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3678
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3708
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3754
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3764
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3759
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3745
uint32_t moveCount
Definition: vk_mem_alloc.h:3746
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3747
Definition: vk_mem_alloc.h:3735
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3737
VkDeviceSize offset
Definition: vk_mem_alloc.h:3738
VmaAllocation allocation
Definition: vk_mem_alloc.h:3736
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3768
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3776
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3770
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3772
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3774
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:2228
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:2234
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:2230
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:2232
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:3065
float priority
A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relat...
Definition: vk_mem_alloc.h:3113
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:3068
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:3071
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:3107
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:3080
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:3085
VkDeviceSize minAllocationAlignment
Additional minimum alignment to be used for all allocations created from this pool....
Definition: vk_mem_alloc.h:3120
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:3093
void * pMemoryAllocateNext
Additional pNext chain to be attached to VkMemoryAllocateInfo used for every allocation made by this ...
Definition: vk_mem_alloc.h:3130
Represents custom memory pool.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:3135
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:3138
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:3157
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:3154
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:3144
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3141
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3147
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2404
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2414
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2406
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2596
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2607
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2607
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2606
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2608
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2600
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2608
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2604
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2598
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2607
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2602
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2608
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2613
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2615
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2614
VmaStatInfo total
Definition: vk_mem_alloc.h:2616
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2358
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2368
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2373
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2361
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2365
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2370
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2362
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2369
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2366
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2360
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2359
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2372
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2374
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2367
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2363
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2364
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2375
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2371
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:2214
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:2026
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
struct VmaStats VmaStats
General statistics from current state of Allocator.
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:3061
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2390
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2398
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2396
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:2238
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2313
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:2243
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2295
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2331
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2283
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2268
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2350
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
Definition: vk_mem_alloc.h:2348
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2894
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
void vmaFreeMemory(VmaAllocator allocator, const VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3653
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3654
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3655
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:2207
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, const VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3657
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:3005
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:3040
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3059
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:3051
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:3023
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:3055
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkResult vmaDefragment(VmaAllocator allocator, const VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
VkResult vmaCreateBufferWithAlignment(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkDeviceSize minAlignment, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Creates a buffer with additional minimum alignment.
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
VmaMemoryUsage
Definition: vk_mem_alloc.h:2718
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2781
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2749
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2771
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2765
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2779
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2756
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2739
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2722
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkResult vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
struct VmaAllocatorCreateInfo VmaAllocatorCreateInfo
Description of a Allocator to be created.
VkResult vmaInvalidateAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Invalidates memory of given set of allocations.
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
VkResult vmaFlushAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Flushes memory of given set of allocations.
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2352
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2785
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2880
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2816
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2853
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2873
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2792
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2847
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2829
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2883
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2836
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2862
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2803
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2877
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2887
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2842
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2857
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2866
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2892
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2400
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.