Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
2022 #ifdef __cplusplus
2023 extern "C" {
2024 #endif
2025 
2026 /*
2027 Define this macro to 0/1 to disable/enable support for recording functionality,
2028 available through VmaAllocatorCreateInfo::pRecordSettings.
2029 */
2030 #ifndef VMA_RECORDING_ENABLED
2031  #define VMA_RECORDING_ENABLED 0
2032 #endif
2033 
2034 #if !defined(NOMINMAX) && defined(VMA_IMPLEMENTATION)
2035  #define NOMINMAX // For windows.h
2036 #endif
2037 
2038 #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
2039  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
2040  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
2041  extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
2042  extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
2043  extern PFN_vkAllocateMemory vkAllocateMemory;
2044  extern PFN_vkFreeMemory vkFreeMemory;
2045  extern PFN_vkMapMemory vkMapMemory;
2046  extern PFN_vkUnmapMemory vkUnmapMemory;
2047  extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
2048  extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
2049  extern PFN_vkBindBufferMemory vkBindBufferMemory;
2050  extern PFN_vkBindImageMemory vkBindImageMemory;
2051  extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
2052  extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
2053  extern PFN_vkCreateBuffer vkCreateBuffer;
2054  extern PFN_vkDestroyBuffer vkDestroyBuffer;
2055  extern PFN_vkCreateImage vkCreateImage;
2056  extern PFN_vkDestroyImage vkDestroyImage;
2057  extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
2058  #if VMA_VULKAN_VERSION >= 1001000
2059  extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
2060  extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
2061  extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
2062  extern PFN_vkBindImageMemory2 vkBindImageMemory2;
2063  extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
2064  #endif // #if VMA_VULKAN_VERSION >= 1001000
2065 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
2066 
2067 #ifndef VULKAN_H_
2068  #include <vulkan/vulkan.h>
2069 #endif
2070 
2071 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
2072 // where AAA = major, BBB = minor, CCC = patch.
2073 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
2074 #if !defined(VMA_VULKAN_VERSION)
2075  #if defined(VK_VERSION_1_2)
2076  #define VMA_VULKAN_VERSION 1002000
2077  #elif defined(VK_VERSION_1_1)
2078  #define VMA_VULKAN_VERSION 1001000
2079  #else
2080  #define VMA_VULKAN_VERSION 1000000
2081  #endif
2082 #endif
2083 
2084 #if !defined(VMA_DEDICATED_ALLOCATION)
2085  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
2086  #define VMA_DEDICATED_ALLOCATION 1
2087  #else
2088  #define VMA_DEDICATED_ALLOCATION 0
2089  #endif
2090 #endif
2091 
2092 #if !defined(VMA_BIND_MEMORY2)
2093  #if VK_KHR_bind_memory2
2094  #define VMA_BIND_MEMORY2 1
2095  #else
2096  #define VMA_BIND_MEMORY2 0
2097  #endif
2098 #endif
2099 
2100 #if !defined(VMA_MEMORY_BUDGET)
2101  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
2102  #define VMA_MEMORY_BUDGET 1
2103  #else
2104  #define VMA_MEMORY_BUDGET 0
2105  #endif
2106 #endif
2107 
2108 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
2109 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
2110  #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
2111  #define VMA_BUFFER_DEVICE_ADDRESS 1
2112  #else
2113  #define VMA_BUFFER_DEVICE_ADDRESS 0
2114  #endif
2115 #endif
2116 
2117 // Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
2118 #if !defined(VMA_MEMORY_PRIORITY)
2119  #if VK_EXT_memory_priority
2120  #define VMA_MEMORY_PRIORITY 1
2121  #else
2122  #define VMA_MEMORY_PRIORITY 0
2123  #endif
2124 #endif
2125 
2126 // Define these macros to decorate all public functions with additional code,
2127 // before and after returned type, appropriately. This may be useful for
2128 // exporting the functions when compiling VMA as a separate library. Example:
2129 // #define VMA_CALL_PRE __declspec(dllexport)
2130 // #define VMA_CALL_POST __cdecl
2131 #ifndef VMA_CALL_PRE
2132  #define VMA_CALL_PRE
2133 #endif
2134 #ifndef VMA_CALL_POST
2135  #define VMA_CALL_POST
2136 #endif
2137 
2138 // Define this macro to decorate pointers with an attribute specifying the
2139 // length of the array they point to if they are not null.
2140 //
2141 // The length may be one of
2142 // - The name of another parameter in the argument list where the pointer is declared
2143 // - The name of another member in the struct where the pointer is declared
2144 // - The name of a member of a struct type, meaning the value of that member in
2145 // the context of the call. For example
2146 // VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
2147 // this means the number of memory heaps available in the device associated
2148 // with the VmaAllocator being dealt with.
2149 #ifndef VMA_LEN_IF_NOT_NULL
2150  #define VMA_LEN_IF_NOT_NULL(len)
2151 #endif
2152 
2153 // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
2154 // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
2155 #ifndef VMA_NULLABLE
2156  #ifdef __clang__
2157  #define VMA_NULLABLE _Nullable
2158  #else
2159  #define VMA_NULLABLE
2160  #endif
2161 #endif
2162 
2163 // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
2164 // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
2165 #ifndef VMA_NOT_NULL
2166  #ifdef __clang__
2167  #define VMA_NOT_NULL _Nonnull
2168  #else
2169  #define VMA_NOT_NULL
2170  #endif
2171 #endif
2172 
2173 // If non-dispatchable handles are represented as pointers then we can give
2174 // then nullability annotations
2175 #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
2176  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2177  #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
2178  #else
2179  #define VMA_NOT_NULL_NON_DISPATCHABLE
2180  #endif
2181 #endif
2182 
2183 #ifndef VMA_NULLABLE_NON_DISPATCHABLE
2184  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2185  #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
2186  #else
2187  #define VMA_NULLABLE_NON_DISPATCHABLE
2188  #endif
2189 #endif
2190 
2200 VK_DEFINE_HANDLE(VmaAllocator)
2201 
2202 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
2204  VmaAllocator VMA_NOT_NULL allocator,
2205  uint32_t memoryType,
2206  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2207  VkDeviceSize size,
2208  void* VMA_NULLABLE pUserData);
2210 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
2211  VmaAllocator VMA_NOT_NULL allocator,
2212  uint32_t memoryType,
2213  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2214  VkDeviceSize size,
2215  void* VMA_NULLABLE pUserData);
2216 
2230  void* VMA_NULLABLE pUserData;
2232 
2345 
2348 typedef VkFlags VmaAllocatorCreateFlags;
2349 
2354 typedef struct VmaVulkanFunctions {
2355  PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
2356  PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
2357  PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
2358  PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
2359  PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
2360  PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
2361  PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
2362  PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
2363  PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
2364  PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
2365  PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
2366  PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
2367  PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
2368  PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
2369  PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
2370  PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
2371  PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
2372 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2373  PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
2374  PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
2375 #endif
2376 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2377  PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
2378  PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
2379 #endif
2380 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2381  PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
2382 #endif
2384 
2386 typedef enum VmaRecordFlagBits {
2393 
2394  VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
2396 typedef VkFlags VmaRecordFlags;
2397 
2399 typedef struct VmaRecordSettings
2400 {
2410  const char* VMA_NOT_NULL pFilePath;
2412 
2415 {
2419 
2420  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2422 
2423  VkDevice VMA_NOT_NULL device;
2425 
2428 
2429  const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
2431 
2471  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
2472 
2484  const VmaRecordSettings* VMA_NULLABLE pRecordSettings;
2489  VkInstance VMA_NOT_NULL instance;
2500 
2502 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2503  const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
2504  VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator);
2505 
2507 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2508  VmaAllocator VMA_NULLABLE allocator);
2509 
2512 typedef struct VmaAllocatorInfo
2513 {
2518  VkInstance VMA_NOT_NULL instance;
2523  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2528  VkDevice VMA_NOT_NULL device;
2530 
2536 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
2537 
2542 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2543  VmaAllocator VMA_NOT_NULL allocator,
2544  const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties);
2545 
2550 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2551  VmaAllocator VMA_NOT_NULL allocator,
2552  const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
2553 
2560 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2561  VmaAllocator VMA_NOT_NULL allocator,
2562  uint32_t memoryTypeIndex,
2563  VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
2564 
2573 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2574  VmaAllocator VMA_NOT_NULL allocator,
2575  uint32_t frameIndex);
2576 
2579 typedef struct VmaStatInfo
2580 {
2582  uint32_t blockCount;
2588  VkDeviceSize usedBytes;
2590  VkDeviceSize unusedBytes;
2591  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
2592  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
2594 
2596 typedef struct VmaStats
2597 {
2598  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2599  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2602 
2612 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2613  VmaAllocator VMA_NOT_NULL allocator,
2614  VmaStats* VMA_NOT_NULL pStats);
2615 
2618 typedef struct VmaBudget
2619 {
2622  VkDeviceSize blockBytes;
2623 
2633  VkDeviceSize allocationBytes;
2634 
2643  VkDeviceSize usage;
2644 
2654  VkDeviceSize budget;
2656 
2667 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2668  VmaAllocator VMA_NOT_NULL allocator,
2669  VmaBudget* VMA_NOT_NULL pBudget);
2670 
2671 #ifndef VMA_STATS_STRING_ENABLED
2672 #define VMA_STATS_STRING_ENABLED 1
2673 #endif
2674 
2675 #if VMA_STATS_STRING_ENABLED
2676 
2678 
2680 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2681  VmaAllocator VMA_NOT_NULL allocator,
2682  char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString,
2683  VkBool32 detailedMap);
2684 
2685 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2686  VmaAllocator VMA_NOT_NULL allocator,
2687  char* VMA_NULLABLE pStatsString);
2688 
2689 #endif // #if VMA_STATS_STRING_ENABLED
2690 
2699 VK_DEFINE_HANDLE(VmaPool)
2700 
2701 typedef enum VmaMemoryUsage
2702 {
2764 
2765  VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
2767 
2777 
2842 
2858 
2868 
2875 
2879 
2881 {
2894  VkMemoryPropertyFlags requiredFlags;
2899  VkMemoryPropertyFlags preferredFlags;
2907  uint32_t memoryTypeBits;
2913  VmaPool VMA_NULLABLE pool;
2920  void* VMA_NULLABLE pUserData;
2927  float priority;
2929 
2946 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2947  VmaAllocator VMA_NOT_NULL allocator,
2948  uint32_t memoryTypeBits,
2949  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2950  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2951 
2964 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2965  VmaAllocator VMA_NOT_NULL allocator,
2966  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2967  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2968  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2969 
2982 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2983  VmaAllocator VMA_NOT_NULL allocator,
2984  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2985  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2986  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2987 
3008 
3025 
3036 
3042 
3045 typedef VkFlags VmaPoolCreateFlags;
3046 
3049 typedef struct VmaPoolCreateInfo {
3064  VkDeviceSize blockSize;
3097  float priority;
3099 
3102 typedef struct VmaPoolStats {
3105  VkDeviceSize size;
3108  VkDeviceSize unusedSize;
3121  VkDeviceSize unusedRangeSizeMax;
3124  size_t blockCount;
3126 
3133 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
3134  VmaAllocator VMA_NOT_NULL allocator,
3135  const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
3136  VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool);
3137 
3140 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
3141  VmaAllocator VMA_NOT_NULL allocator,
3142  VmaPool VMA_NULLABLE pool);
3143 
3150 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
3151  VmaAllocator VMA_NOT_NULL allocator,
3152  VmaPool VMA_NOT_NULL pool,
3153  VmaPoolStats* VMA_NOT_NULL pPoolStats);
3154 
3161 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
3162  VmaAllocator VMA_NOT_NULL allocator,
3163  VmaPool VMA_NOT_NULL pool,
3164  size_t* VMA_NULLABLE pLostAllocationCount);
3165 
3180 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool);
3181 
3188 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
3189  VmaAllocator VMA_NOT_NULL allocator,
3190  VmaPool VMA_NOT_NULL pool,
3191  const char* VMA_NULLABLE * VMA_NOT_NULL ppName);
3192 
3198 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
3199  VmaAllocator VMA_NOT_NULL allocator,
3200  VmaPool VMA_NOT_NULL pool,
3201  const char* VMA_NULLABLE pName);
3202 
3227 VK_DEFINE_HANDLE(VmaAllocation)
3228 
3229 
3231 typedef struct VmaAllocationInfo {
3236  uint32_t memoryType;
3245  VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
3255  VkDeviceSize offset;
3266  VkDeviceSize size;
3275  void* VMA_NULLABLE pMappedData;
3280  void* VMA_NULLABLE pUserData;
3282 
3293 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3294  VmaAllocator VMA_NOT_NULL allocator,
3295  const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
3296  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3297  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3298  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3299 
3319 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3320  VmaAllocator VMA_NOT_NULL allocator,
3321  const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
3322  const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
3323  size_t allocationCount,
3324  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3325  VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
3326 
3333 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3334  VmaAllocator VMA_NOT_NULL allocator,
3335  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3336  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3337  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3338  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3339 
3341 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3342  VmaAllocator VMA_NOT_NULL allocator,
3343  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3344  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3345  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3346  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3347 
3352 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3353  VmaAllocator VMA_NOT_NULL allocator,
3354  const VmaAllocation VMA_NULLABLE allocation);
3355 
3366 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3367  VmaAllocator VMA_NOT_NULL allocator,
3368  size_t allocationCount,
3369  const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
3370 
3378 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
3379  VmaAllocator VMA_NOT_NULL allocator,
3380  VmaAllocation VMA_NOT_NULL allocation,
3381  VkDeviceSize newSize);
3382 
3399 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3400  VmaAllocator VMA_NOT_NULL allocator,
3401  VmaAllocation VMA_NOT_NULL allocation,
3402  VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
3403 
3418 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3419  VmaAllocator VMA_NOT_NULL allocator,
3420  VmaAllocation VMA_NOT_NULL allocation);
3421 
3435 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3436  VmaAllocator VMA_NOT_NULL allocator,
3437  VmaAllocation VMA_NOT_NULL allocation,
3438  void* VMA_NULLABLE pUserData);
3439 
3450 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3451  VmaAllocator VMA_NOT_NULL allocator,
3452  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation);
3453 
3492 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3493  VmaAllocator VMA_NOT_NULL allocator,
3494  VmaAllocation VMA_NOT_NULL allocation,
3495  void* VMA_NULLABLE * VMA_NOT_NULL ppData);
3496 
3505 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3506  VmaAllocator VMA_NOT_NULL allocator,
3507  VmaAllocation VMA_NOT_NULL allocation);
3508 
3530 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
3531  VmaAllocator VMA_NOT_NULL allocator,
3532  VmaAllocation VMA_NOT_NULL allocation,
3533  VkDeviceSize offset,
3534  VkDeviceSize size);
3535 
3557 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
3558  VmaAllocator VMA_NOT_NULL allocator,
3559  VmaAllocation VMA_NOT_NULL allocation,
3560  VkDeviceSize offset,
3561  VkDeviceSize size);
3562 
3577 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
3578  VmaAllocator VMA_NOT_NULL allocator,
3579  uint32_t allocationCount,
3580  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3581  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3582  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3583 
3598 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
3599  VmaAllocator VMA_NOT_NULL allocator,
3600  uint32_t allocationCount,
3601  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3602  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3603  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3604 
3621 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits);
3622 
3629 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3630 
3631 typedef enum VmaDefragmentationFlagBits {
3636 typedef VkFlags VmaDefragmentationFlags;
3637 
3642 typedef struct VmaDefragmentationInfo2 {
3657  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
3663  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged;
3666  uint32_t poolCount;
3682  const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
3687  VkDeviceSize maxCpuBytesToMove;
3697  VkDeviceSize maxGpuBytesToMove;
3711  VkCommandBuffer VMA_NULLABLE commandBuffer;
3713 
3716  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory;
3717  VkDeviceSize offset;
3719 
3725  uint32_t moveCount;
3726  VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
3728 
3733 typedef struct VmaDefragmentationInfo {
3738  VkDeviceSize maxBytesToMove;
3745 
3747 typedef struct VmaDefragmentationStats {
3749  VkDeviceSize bytesMoved;
3751  VkDeviceSize bytesFreed;
3757 
3787 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3788  VmaAllocator VMA_NOT_NULL allocator,
3789  const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo,
3790  VmaDefragmentationStats* VMA_NULLABLE pStats,
3791  VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext);
3792 
3798 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3799  VmaAllocator VMA_NOT_NULL allocator,
3800  VmaDefragmentationContext VMA_NULLABLE context);
3801 
3802 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3803  VmaAllocator VMA_NOT_NULL allocator,
3804  VmaDefragmentationContext VMA_NULLABLE context,
3805  VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo
3806 );
3807 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3808  VmaAllocator VMA_NOT_NULL allocator,
3809  VmaDefragmentationContext VMA_NULLABLE context
3810 );
3811 
3852 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3853  VmaAllocator VMA_NOT_NULL allocator,
3854  const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3855  size_t allocationCount,
3856  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
3857  const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo,
3858  VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats);
3859 
3872 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3873  VmaAllocator VMA_NOT_NULL allocator,
3874  VmaAllocation VMA_NOT_NULL allocation,
3875  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
3876 
3887 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3888  VmaAllocator VMA_NOT_NULL allocator,
3889  VmaAllocation VMA_NOT_NULL allocation,
3890  VkDeviceSize allocationLocalOffset,
3891  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3892  const void* VMA_NULLABLE pNext);
3893 
3906 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3907  VmaAllocator VMA_NOT_NULL allocator,
3908  VmaAllocation VMA_NOT_NULL allocation,
3909  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
3910 
3921 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3922  VmaAllocator VMA_NOT_NULL allocator,
3923  VmaAllocation VMA_NOT_NULL allocation,
3924  VkDeviceSize allocationLocalOffset,
3925  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3926  const void* VMA_NULLABLE pNext);
3927 
3958 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3959  VmaAllocator VMA_NOT_NULL allocator,
3960  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
3961  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3962  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
3963  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3964  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3965 
3977 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3978  VmaAllocator VMA_NOT_NULL allocator,
3979  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
3980  VmaAllocation VMA_NULLABLE allocation);
3981 
3983 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3984  VmaAllocator VMA_NOT_NULL allocator,
3985  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
3986  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3987  VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
3988  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3989  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3990 
4002 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
4003  VmaAllocator VMA_NOT_NULL allocator,
4004  VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
4005  VmaAllocation VMA_NULLABLE allocation);
4006 
4007 #ifdef __cplusplus
4008 }
4009 #endif
4010 
4011 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
4012 
4013 // For Visual Studio IntelliSense.
4014 #if defined(__cplusplus) && defined(__INTELLISENSE__)
4015 #define VMA_IMPLEMENTATION
4016 #endif
4017 
4018 #ifdef VMA_IMPLEMENTATION
4019 #undef VMA_IMPLEMENTATION
4020 
4021 #include <cstdint>
4022 #include <cstdlib>
4023 #include <cstring>
4024 #include <utility>
4025 
4026 #if VMA_RECORDING_ENABLED
4027  #include <chrono>
4028  #if defined(_WIN32)
4029  #include <windows.h>
4030  #else
4031  #include <sstream>
4032  #include <thread>
4033  #endif
4034 #endif
4035 
4036 /*******************************************************************************
4037 CONFIGURATION SECTION
4038 
4039 Define some of these macros before each #include of this header or change them
4040 here if you need other then default behavior depending on your environment.
4041 */
4042 
4043 /*
4044 Define this macro to 1 to make the library fetch pointers to Vulkan functions
4045 internally, like:
4046 
4047  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
4048 */
4049 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
4050  #define VMA_STATIC_VULKAN_FUNCTIONS 1
4051 #endif
4052 
4053 /*
4054 Define this macro to 1 to make the library fetch pointers to Vulkan functions
4055 internally, like:
4056 
4057  vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory);
4058 */
4059 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
4060  #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
4061  #if defined(VK_NO_PROTOTYPES)
4062  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
4063  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
4064  #endif
4065 #endif
4066 
4067 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
4068 //#define VMA_USE_STL_CONTAINERS 1
4069 
4070 /* Set this macro to 1 to make the library including and using STL containers:
4071 std::pair, std::vector, std::list, std::unordered_map.
4072 
4073 Set it to 0 or undefined to make the library using its own implementation of
4074 the containers.
4075 */
4076 #if VMA_USE_STL_CONTAINERS
4077  #define VMA_USE_STL_VECTOR 1
4078  #define VMA_USE_STL_UNORDERED_MAP 1
4079  #define VMA_USE_STL_LIST 1
4080 #endif
4081 
4082 #ifndef VMA_USE_STL_SHARED_MUTEX
4083  // Compiler conforms to C++17.
4084  #if __cplusplus >= 201703L
4085  #define VMA_USE_STL_SHARED_MUTEX 1
4086  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
4087  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
4088  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
4089  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
4090  #define VMA_USE_STL_SHARED_MUTEX 1
4091  #else
4092  #define VMA_USE_STL_SHARED_MUTEX 0
4093  #endif
4094 #endif
4095 
4096 /*
4097 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
4098 Library has its own container implementation.
4099 */
4100 #if VMA_USE_STL_VECTOR
4101  #include <vector>
4102 #endif
4103 
4104 #if VMA_USE_STL_UNORDERED_MAP
4105  #include <unordered_map>
4106 #endif
4107 
4108 #if VMA_USE_STL_LIST
4109  #include <list>
4110 #endif
4111 
4112 /*
4113 Following headers are used in this CONFIGURATION section only, so feel free to
4114 remove them if not needed.
4115 */
4116 #include <cassert> // for assert
4117 #include <algorithm> // for min, max
4118 #include <mutex>
4119 
4120 #ifndef VMA_NULL
4121  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
4122  #define VMA_NULL nullptr
4123 #endif
4124 
4125 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
4126 #include <cstdlib>
4127 static void* vma_aligned_alloc(size_t alignment, size_t size)
4128 {
4129  // alignment must be >= sizeof(void*)
4130  if(alignment < sizeof(void*))
4131  {
4132  alignment = sizeof(void*);
4133  }
4134 
4135  return memalign(alignment, size);
4136 }
4137 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
4138 #include <cstdlib>
4139 
4140 #if defined(__APPLE__)
4141 #include <AvailabilityMacros.h>
4142 #endif
4143 
4144 static void* vma_aligned_alloc(size_t alignment, size_t size)
4145 {
4146 #if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
4147 #if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
4148  // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
4149  // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
4150  // MAC_OS_X_VERSION_10_16), even though the function is marked
4151  // availabe for 10.15. That's why the preprocessor checks for 10.16 but
4152  // the __builtin_available checks for 10.15.
4153  // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
4154  if (__builtin_available(macOS 10.15, iOS 13, *))
4155  return aligned_alloc(alignment, size);
4156 #endif
4157 #endif
4158  // alignment must be >= sizeof(void*)
4159  if(alignment < sizeof(void*))
4160  {
4161  alignment = sizeof(void*);
4162  }
4163 
4164  void *pointer;
4165  if(posix_memalign(&pointer, alignment, size) == 0)
4166  return pointer;
4167  return VMA_NULL;
4168 }
4169 #elif defined(_WIN32)
4170 static void* vma_aligned_alloc(size_t alignment, size_t size)
4171 {
4172  return _aligned_malloc(size, alignment);
4173 }
4174 #else
4175 static void* vma_aligned_alloc(size_t alignment, size_t size)
4176 {
4177  return aligned_alloc(alignment, size);
4178 }
4179 #endif
4180 
4181 #if defined(_WIN32)
4182 static void vma_aligned_free(void* ptr)
4183 {
4184  _aligned_free(ptr);
4185 }
4186 #else
4187 static void vma_aligned_free(void* ptr)
4188 {
4189  free(ptr);
4190 }
4191 #endif
4192 
4193 // If your compiler is not compatible with C++11 and definition of
4194 // aligned_alloc() function is missing, uncommeting following line may help:
4195 
4196 //#include <malloc.h>
4197 
4198 // Normal assert to check for programmer's errors, especially in Debug configuration.
4199 #ifndef VMA_ASSERT
4200  #ifdef NDEBUG
4201  #define VMA_ASSERT(expr)
4202  #else
4203  #define VMA_ASSERT(expr) assert(expr)
4204  #endif
4205 #endif
4206 
4207 // Assert that will be called very often, like inside data structures e.g. operator[].
4208 // Making it non-empty can make program slow.
4209 #ifndef VMA_HEAVY_ASSERT
4210  #ifdef NDEBUG
4211  #define VMA_HEAVY_ASSERT(expr)
4212  #else
4213  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
4214  #endif
4215 #endif
4216 
4217 #ifndef VMA_ALIGN_OF
4218  #define VMA_ALIGN_OF(type) (__alignof(type))
4219 #endif
4220 
4221 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
4222  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
4223 #endif
4224 
4225 #ifndef VMA_SYSTEM_ALIGNED_FREE
4226  // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
4227  #if defined(VMA_SYSTEM_FREE)
4228  #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
4229  #else
4230  #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
4231  #endif
4232 #endif
4233 
4234 #ifndef VMA_MIN
4235  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
4236 #endif
4237 
4238 #ifndef VMA_MAX
4239  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
4240 #endif
4241 
4242 #ifndef VMA_SWAP
4243  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
4244 #endif
4245 
4246 #ifndef VMA_SORT
4247  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
4248 #endif
4249 
4250 #ifndef VMA_DEBUG_LOG
4251  #define VMA_DEBUG_LOG(format, ...)
4252  /*
4253  #define VMA_DEBUG_LOG(format, ...) do { \
4254  printf(format, __VA_ARGS__); \
4255  printf("\n"); \
4256  } while(false)
4257  */
4258 #endif
4259 
4260 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
4261 #if VMA_STATS_STRING_ENABLED
4262  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
4263  {
4264  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
4265  }
4266  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
4267  {
4268  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
4269  }
4270  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
4271  {
4272  snprintf(outStr, strLen, "%p", ptr);
4273  }
4274 #endif
4275 
4276 #ifndef VMA_MUTEX
4277  class VmaMutex
4278  {
4279  public:
4280  void Lock() { m_Mutex.lock(); }
4281  void Unlock() { m_Mutex.unlock(); }
4282  bool TryLock() { return m_Mutex.try_lock(); }
4283  private:
4284  std::mutex m_Mutex;
4285  };
4286  #define VMA_MUTEX VmaMutex
4287 #endif
4288 
4289 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
4290 #ifndef VMA_RW_MUTEX
4291  #if VMA_USE_STL_SHARED_MUTEX
4292  // Use std::shared_mutex from C++17.
4293  #include <shared_mutex>
4294  class VmaRWMutex
4295  {
4296  public:
4297  void LockRead() { m_Mutex.lock_shared(); }
4298  void UnlockRead() { m_Mutex.unlock_shared(); }
4299  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
4300  void LockWrite() { m_Mutex.lock(); }
4301  void UnlockWrite() { m_Mutex.unlock(); }
4302  bool TryLockWrite() { return m_Mutex.try_lock(); }
4303  private:
4304  std::shared_mutex m_Mutex;
4305  };
4306  #define VMA_RW_MUTEX VmaRWMutex
4307  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
4308  // Use SRWLOCK from WinAPI.
4309  // Minimum supported client = Windows Vista, server = Windows Server 2008.
4310  class VmaRWMutex
4311  {
4312  public:
4313  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
4314  void LockRead() { AcquireSRWLockShared(&m_Lock); }
4315  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
4316  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
4317  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
4318  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
4319  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
4320  private:
4321  SRWLOCK m_Lock;
4322  };
4323  #define VMA_RW_MUTEX VmaRWMutex
4324  #else
4325  // Less efficient fallback: Use normal mutex.
4326  class VmaRWMutex
4327  {
4328  public:
4329  void LockRead() { m_Mutex.Lock(); }
4330  void UnlockRead() { m_Mutex.Unlock(); }
4331  bool TryLockRead() { return m_Mutex.TryLock(); }
4332  void LockWrite() { m_Mutex.Lock(); }
4333  void UnlockWrite() { m_Mutex.Unlock(); }
4334  bool TryLockWrite() { return m_Mutex.TryLock(); }
4335  private:
4336  VMA_MUTEX m_Mutex;
4337  };
4338  #define VMA_RW_MUTEX VmaRWMutex
4339  #endif // #if VMA_USE_STL_SHARED_MUTEX
4340 #endif // #ifndef VMA_RW_MUTEX
4341 
4342 /*
4343 If providing your own implementation, you need to implement a subset of std::atomic.
4344 */
4345 #ifndef VMA_ATOMIC_UINT32
4346  #include <atomic>
4347  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
4348 #endif
4349 
4350 #ifndef VMA_ATOMIC_UINT64
4351  #include <atomic>
4352  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
4353 #endif
4354 
4355 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
4356 
4360  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
4361 #endif
4362 
4363 #ifndef VMA_DEBUG_ALIGNMENT
4364 
4368  #define VMA_DEBUG_ALIGNMENT (1)
4369 #endif
4370 
4371 #ifndef VMA_DEBUG_MARGIN
4372 
4376  #define VMA_DEBUG_MARGIN (0)
4377 #endif
4378 
4379 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
4380 
4384  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
4385 #endif
4386 
4387 #ifndef VMA_DEBUG_DETECT_CORRUPTION
4388 
4393  #define VMA_DEBUG_DETECT_CORRUPTION (0)
4394 #endif
4395 
4396 #ifndef VMA_DEBUG_GLOBAL_MUTEX
4397 
4401  #define VMA_DEBUG_GLOBAL_MUTEX (0)
4402 #endif
4403 
4404 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4405 
4409  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
4410 #endif
4411 
4412 #ifndef VMA_SMALL_HEAP_MAX_SIZE
4413  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
4415 #endif
4416 
4417 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
4418  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
4420 #endif
4421 
4422 #ifndef VMA_CLASS_NO_COPY
4423  #define VMA_CLASS_NO_COPY(className) \
4424  private: \
4425  className(const className&) = delete; \
4426  className& operator=(const className&) = delete;
4427 #endif
4428 
4429 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4430 
4431 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4432 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4433 
4434 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4435 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4436 
4437 /*******************************************************************************
4438 END OF CONFIGURATION
4439 */
4440 
4441 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4442 
4443 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4444 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4445 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4446 
4447 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4448 
4449 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4450  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4451 
4452 // Returns number of bits set to 1 in (v).
4453 static inline uint32_t VmaCountBitsSet(uint32_t v)
4454 {
4455  uint32_t c = v - ((v >> 1) & 0x55555555);
4456  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4457  c = ((c >> 4) + c) & 0x0F0F0F0F;
4458  c = ((c >> 8) + c) & 0x00FF00FF;
4459  c = ((c >> 16) + c) & 0x0000FFFF;
4460  return c;
4461 }
4462 
4463 /*
4464 Returns true if given number is a power of two.
4465 T must be unsigned integer number or signed integer but always nonnegative.
4466 For 0 returns true.
4467 */
4468 template <typename T>
4469 inline bool VmaIsPow2(T x)
4470 {
4471  return (x & (x-1)) == 0;
4472 }
4473 
4474 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4475 // Use types like uint32_t, uint64_t as T.
4476 template <typename T>
4477 static inline T VmaAlignUp(T val, T alignment)
4478 {
4479  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4480  return (val + alignment - 1) & ~(alignment - 1);
4481 }
4482 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4483 // Use types like uint32_t, uint64_t as T.
4484 template <typename T>
4485 static inline T VmaAlignDown(T val, T alignment)
4486 {
4487  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4488  return val & ~(alignment - 1);
4489 }
4490 
4491 // Division with mathematical rounding to nearest number.
4492 template <typename T>
4493 static inline T VmaRoundDiv(T x, T y)
4494 {
4495  return (x + (y / (T)2)) / y;
4496 }
4497 
4498 // Returns smallest power of 2 greater or equal to v.
4499 static inline uint32_t VmaNextPow2(uint32_t v)
4500 {
4501  v--;
4502  v |= v >> 1;
4503  v |= v >> 2;
4504  v |= v >> 4;
4505  v |= v >> 8;
4506  v |= v >> 16;
4507  v++;
4508  return v;
4509 }
4510 static inline uint64_t VmaNextPow2(uint64_t v)
4511 {
4512  v--;
4513  v |= v >> 1;
4514  v |= v >> 2;
4515  v |= v >> 4;
4516  v |= v >> 8;
4517  v |= v >> 16;
4518  v |= v >> 32;
4519  v++;
4520  return v;
4521 }
4522 
4523 // Returns largest power of 2 less or equal to v.
4524 static inline uint32_t VmaPrevPow2(uint32_t v)
4525 {
4526  v |= v >> 1;
4527  v |= v >> 2;
4528  v |= v >> 4;
4529  v |= v >> 8;
4530  v |= v >> 16;
4531  v = v ^ (v >> 1);
4532  return v;
4533 }
4534 static inline uint64_t VmaPrevPow2(uint64_t v)
4535 {
4536  v |= v >> 1;
4537  v |= v >> 2;
4538  v |= v >> 4;
4539  v |= v >> 8;
4540  v |= v >> 16;
4541  v |= v >> 32;
4542  v = v ^ (v >> 1);
4543  return v;
4544 }
4545 
4546 static inline bool VmaStrIsEmpty(const char* pStr)
4547 {
4548  return pStr == VMA_NULL || *pStr == '\0';
4549 }
4550 
4551 #if VMA_STATS_STRING_ENABLED
4552 
4553 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4554 {
4555  switch(algorithm)
4556  {
4558  return "Linear";
4560  return "Buddy";
4561  case 0:
4562  return "Default";
4563  default:
4564  VMA_ASSERT(0);
4565  return "";
4566  }
4567 }
4568 
4569 #endif // #if VMA_STATS_STRING_ENABLED
4570 
4571 #ifndef VMA_SORT
4572 
4573 template<typename Iterator, typename Compare>
4574 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4575 {
4576  Iterator centerValue = end; --centerValue;
4577  Iterator insertIndex = beg;
4578  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4579  {
4580  if(cmp(*memTypeIndex, *centerValue))
4581  {
4582  if(insertIndex != memTypeIndex)
4583  {
4584  VMA_SWAP(*memTypeIndex, *insertIndex);
4585  }
4586  ++insertIndex;
4587  }
4588  }
4589  if(insertIndex != centerValue)
4590  {
4591  VMA_SWAP(*insertIndex, *centerValue);
4592  }
4593  return insertIndex;
4594 }
4595 
4596 template<typename Iterator, typename Compare>
4597 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4598 {
4599  if(beg < end)
4600  {
4601  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4602  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4603  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4604  }
4605 }
4606 
4607 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4608 
4609 #endif // #ifndef VMA_SORT
4610 
4611 /*
4612 Returns true if two memory blocks occupy overlapping pages.
4613 ResourceA must be in less memory offset than ResourceB.
4614 
4615 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4616 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4617 */
4618 static inline bool VmaBlocksOnSamePage(
4619  VkDeviceSize resourceAOffset,
4620  VkDeviceSize resourceASize,
4621  VkDeviceSize resourceBOffset,
4622  VkDeviceSize pageSize)
4623 {
4624  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4625  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4626  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4627  VkDeviceSize resourceBStart = resourceBOffset;
4628  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4629  return resourceAEndPage == resourceBStartPage;
4630 }
4631 
4632 enum VmaSuballocationType
4633 {
4634  VMA_SUBALLOCATION_TYPE_FREE = 0,
4635  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4636  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4637  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4638  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4639  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4640  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4641 };
4642 
4643 /*
4644 Returns true if given suballocation types could conflict and must respect
4645 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4646 or linear image and another one is optimal image. If type is unknown, behave
4647 conservatively.
4648 */
4649 static inline bool VmaIsBufferImageGranularityConflict(
4650  VmaSuballocationType suballocType1,
4651  VmaSuballocationType suballocType2)
4652 {
4653  if(suballocType1 > suballocType2)
4654  {
4655  VMA_SWAP(suballocType1, suballocType2);
4656  }
4657 
4658  switch(suballocType1)
4659  {
4660  case VMA_SUBALLOCATION_TYPE_FREE:
4661  return false;
4662  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4663  return true;
4664  case VMA_SUBALLOCATION_TYPE_BUFFER:
4665  return
4666  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4667  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4668  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4669  return
4670  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4671  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4672  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4673  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4674  return
4675  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4676  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4677  return false;
4678  default:
4679  VMA_ASSERT(0);
4680  return true;
4681  }
4682 }
4683 
4684 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4685 {
4686 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4687  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4688  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4689  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4690  {
4691  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4692  }
4693 #else
4694  // no-op
4695 #endif
4696 }
4697 
4698 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4699 {
4700 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4701  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4702  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4703  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4704  {
4705  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4706  {
4707  return false;
4708  }
4709  }
4710 #endif
4711  return true;
4712 }
4713 
4714 /*
4715 Fills structure with parameters of an example buffer to be used for transfers
4716 during GPU memory defragmentation.
4717 */
4718 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4719 {
4720  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4721  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4722  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4723  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4724 }
4725 
4726 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4727 struct VmaMutexLock
4728 {
4729  VMA_CLASS_NO_COPY(VmaMutexLock)
4730 public:
4731  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4732  m_pMutex(useMutex ? &mutex : VMA_NULL)
4733  { if(m_pMutex) { m_pMutex->Lock(); } }
4734  ~VmaMutexLock()
4735  { if(m_pMutex) { m_pMutex->Unlock(); } }
4736 private:
4737  VMA_MUTEX* m_pMutex;
4738 };
4739 
4740 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4741 struct VmaMutexLockRead
4742 {
4743  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4744 public:
4745  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4746  m_pMutex(useMutex ? &mutex : VMA_NULL)
4747  { if(m_pMutex) { m_pMutex->LockRead(); } }
4748  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4749 private:
4750  VMA_RW_MUTEX* m_pMutex;
4751 };
4752 
4753 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4754 struct VmaMutexLockWrite
4755 {
4756  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4757 public:
4758  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4759  m_pMutex(useMutex ? &mutex : VMA_NULL)
4760  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4761  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4762 private:
4763  VMA_RW_MUTEX* m_pMutex;
4764 };
4765 
4766 #if VMA_DEBUG_GLOBAL_MUTEX
4767  static VMA_MUTEX gDebugGlobalMutex;
4768  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4769 #else
4770  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4771 #endif
4772 
4773 // Minimum size of a free suballocation to register it in the free suballocation collection.
4774 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4775 
4776 /*
4777 Performs binary search and returns iterator to first element that is greater or
4778 equal to (key), according to comparison (cmp).
4779 
4780 Cmp should return true if first argument is less than second argument.
4781 
4782 Returned value is the found element, if present in the collection or place where
4783 new element with value (key) should be inserted.
4784 */
4785 template <typename CmpLess, typename IterT, typename KeyT>
4786 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4787 {
4788  size_t down = 0, up = (end - beg);
4789  while(down < up)
4790  {
4791  const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
4792  if(cmp(*(beg+mid), key))
4793  {
4794  down = mid + 1;
4795  }
4796  else
4797  {
4798  up = mid;
4799  }
4800  }
4801  return beg + down;
4802 }
4803 
4804 template<typename CmpLess, typename IterT, typename KeyT>
4805 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4806 {
4807  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4808  beg, end, value, cmp);
4809  if(it == end ||
4810  (!cmp(*it, value) && !cmp(value, *it)))
4811  {
4812  return it;
4813  }
4814  return end;
4815 }
4816 
4817 /*
4818 Returns true if all pointers in the array are not-null and unique.
4819 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4820 T must be pointer type, e.g. VmaAllocation, VmaPool.
4821 */
4822 template<typename T>
4823 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4824 {
4825  for(uint32_t i = 0; i < count; ++i)
4826  {
4827  const T iPtr = arr[i];
4828  if(iPtr == VMA_NULL)
4829  {
4830  return false;
4831  }
4832  for(uint32_t j = i + 1; j < count; ++j)
4833  {
4834  if(iPtr == arr[j])
4835  {
4836  return false;
4837  }
4838  }
4839  }
4840  return true;
4841 }
4842 
4843 template<typename MainT, typename NewT>
4844 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4845 {
4846  newStruct->pNext = mainStruct->pNext;
4847  mainStruct->pNext = newStruct;
4848 }
4849 
4851 // Memory allocation
4852 
4853 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4854 {
4855  void* result = VMA_NULL;
4856  if((pAllocationCallbacks != VMA_NULL) &&
4857  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4858  {
4859  result = (*pAllocationCallbacks->pfnAllocation)(
4860  pAllocationCallbacks->pUserData,
4861  size,
4862  alignment,
4863  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4864  }
4865  else
4866  {
4867  result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4868  }
4869  VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
4870  return result;
4871 }
4872 
4873 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4874 {
4875  if((pAllocationCallbacks != VMA_NULL) &&
4876  (pAllocationCallbacks->pfnFree != VMA_NULL))
4877  {
4878  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4879  }
4880  else
4881  {
4882  VMA_SYSTEM_ALIGNED_FREE(ptr);
4883  }
4884 }
4885 
4886 template<typename T>
4887 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4888 {
4889  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4890 }
4891 
4892 template<typename T>
4893 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4894 {
4895  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4896 }
4897 
4898 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4899 
4900 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4901 
4902 template<typename T>
4903 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4904 {
4905  ptr->~T();
4906  VmaFree(pAllocationCallbacks, ptr);
4907 }
4908 
4909 template<typename T>
4910 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4911 {
4912  if(ptr != VMA_NULL)
4913  {
4914  for(size_t i = count; i--; )
4915  {
4916  ptr[i].~T();
4917  }
4918  VmaFree(pAllocationCallbacks, ptr);
4919  }
4920 }
4921 
4922 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4923 {
4924  if(srcStr != VMA_NULL)
4925  {
4926  const size_t len = strlen(srcStr);
4927  char* const result = vma_new_array(allocs, char, len + 1);
4928  memcpy(result, srcStr, len + 1);
4929  return result;
4930  }
4931  else
4932  {
4933  return VMA_NULL;
4934  }
4935 }
4936 
4937 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4938 {
4939  if(str != VMA_NULL)
4940  {
4941  const size_t len = strlen(str);
4942  vma_delete_array(allocs, str, len + 1);
4943  }
4944 }
4945 
4946 // STL-compatible allocator.
4947 template<typename T>
4948 class VmaStlAllocator
4949 {
4950 public:
4951  const VkAllocationCallbacks* const m_pCallbacks;
4952  typedef T value_type;
4953 
4954  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4955  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4956 
4957  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4958  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4959 
4960  template<typename U>
4961  bool operator==(const VmaStlAllocator<U>& rhs) const
4962  {
4963  return m_pCallbacks == rhs.m_pCallbacks;
4964  }
4965  template<typename U>
4966  bool operator!=(const VmaStlAllocator<U>& rhs) const
4967  {
4968  return m_pCallbacks != rhs.m_pCallbacks;
4969  }
4970 
4971  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4972 };
4973 
4974 #if VMA_USE_STL_VECTOR
4975 
4976 #define VmaVector std::vector
4977 
4978 template<typename T, typename allocatorT>
4979 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4980 {
4981  vec.insert(vec.begin() + index, item);
4982 }
4983 
4984 template<typename T, typename allocatorT>
4985 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4986 {
4987  vec.erase(vec.begin() + index);
4988 }
4989 
4990 #else // #if VMA_USE_STL_VECTOR
4991 
4992 /* Class with interface compatible with subset of std::vector.
4993 T must be POD because constructors and destructors are not called and memcpy is
4994 used for these objects. */
4995 template<typename T, typename AllocatorT>
4996 class VmaVector
4997 {
4998 public:
4999  typedef T value_type;
5000 
5001  VmaVector(const AllocatorT& allocator) :
5002  m_Allocator(allocator),
5003  m_pArray(VMA_NULL),
5004  m_Count(0),
5005  m_Capacity(0)
5006  {
5007  }
5008 
5009  VmaVector(size_t count, const AllocatorT& allocator) :
5010  m_Allocator(allocator),
5011  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
5012  m_Count(count),
5013  m_Capacity(count)
5014  {
5015  }
5016 
5017  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
5018  // value is unused.
5019  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
5020  : VmaVector(count, allocator) {}
5021 
5022  VmaVector(const VmaVector<T, AllocatorT>& src) :
5023  m_Allocator(src.m_Allocator),
5024  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
5025  m_Count(src.m_Count),
5026  m_Capacity(src.m_Count)
5027  {
5028  if(m_Count != 0)
5029  {
5030  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
5031  }
5032  }
5033 
5034  ~VmaVector()
5035  {
5036  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5037  }
5038 
5039  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
5040  {
5041  if(&rhs != this)
5042  {
5043  resize(rhs.m_Count);
5044  if(m_Count != 0)
5045  {
5046  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
5047  }
5048  }
5049  return *this;
5050  }
5051 
5052  bool empty() const { return m_Count == 0; }
5053  size_t size() const { return m_Count; }
5054  T* data() { return m_pArray; }
5055  const T* data() const { return m_pArray; }
5056 
5057  T& operator[](size_t index)
5058  {
5059  VMA_HEAVY_ASSERT(index < m_Count);
5060  return m_pArray[index];
5061  }
5062  const T& operator[](size_t index) const
5063  {
5064  VMA_HEAVY_ASSERT(index < m_Count);
5065  return m_pArray[index];
5066  }
5067 
5068  T& front()
5069  {
5070  VMA_HEAVY_ASSERT(m_Count > 0);
5071  return m_pArray[0];
5072  }
5073  const T& front() const
5074  {
5075  VMA_HEAVY_ASSERT(m_Count > 0);
5076  return m_pArray[0];
5077  }
5078  T& back()
5079  {
5080  VMA_HEAVY_ASSERT(m_Count > 0);
5081  return m_pArray[m_Count - 1];
5082  }
5083  const T& back() const
5084  {
5085  VMA_HEAVY_ASSERT(m_Count > 0);
5086  return m_pArray[m_Count - 1];
5087  }
5088 
5089  void reserve(size_t newCapacity, bool freeMemory = false)
5090  {
5091  newCapacity = VMA_MAX(newCapacity, m_Count);
5092 
5093  if((newCapacity < m_Capacity) && !freeMemory)
5094  {
5095  newCapacity = m_Capacity;
5096  }
5097 
5098  if(newCapacity != m_Capacity)
5099  {
5100  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
5101  if(m_Count != 0)
5102  {
5103  memcpy(newArray, m_pArray, m_Count * sizeof(T));
5104  }
5105  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5106  m_Capacity = newCapacity;
5107  m_pArray = newArray;
5108  }
5109  }
5110 
5111  void resize(size_t newCount, bool freeMemory = false)
5112  {
5113  size_t newCapacity = m_Capacity;
5114  if(newCount > m_Capacity)
5115  {
5116  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
5117  }
5118  else if(freeMemory)
5119  {
5120  newCapacity = newCount;
5121  }
5122 
5123  if(newCapacity != m_Capacity)
5124  {
5125  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
5126  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
5127  if(elementsToCopy != 0)
5128  {
5129  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
5130  }
5131  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5132  m_Capacity = newCapacity;
5133  m_pArray = newArray;
5134  }
5135 
5136  m_Count = newCount;
5137  }
5138 
5139  void clear(bool freeMemory = false)
5140  {
5141  resize(0, freeMemory);
5142  }
5143 
5144  void insert(size_t index, const T& src)
5145  {
5146  VMA_HEAVY_ASSERT(index <= m_Count);
5147  const size_t oldCount = size();
5148  resize(oldCount + 1);
5149  if(index < oldCount)
5150  {
5151  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
5152  }
5153  m_pArray[index] = src;
5154  }
5155 
5156  void remove(size_t index)
5157  {
5158  VMA_HEAVY_ASSERT(index < m_Count);
5159  const size_t oldCount = size();
5160  if(index < oldCount - 1)
5161  {
5162  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
5163  }
5164  resize(oldCount - 1);
5165  }
5166 
5167  void push_back(const T& src)
5168  {
5169  const size_t newIndex = size();
5170  resize(newIndex + 1);
5171  m_pArray[newIndex] = src;
5172  }
5173 
5174  void pop_back()
5175  {
5176  VMA_HEAVY_ASSERT(m_Count > 0);
5177  resize(size() - 1);
5178  }
5179 
5180  void push_front(const T& src)
5181  {
5182  insert(0, src);
5183  }
5184 
5185  void pop_front()
5186  {
5187  VMA_HEAVY_ASSERT(m_Count > 0);
5188  remove(0);
5189  }
5190 
5191  typedef T* iterator;
5192 
5193  iterator begin() { return m_pArray; }
5194  iterator end() { return m_pArray + m_Count; }
5195 
5196 private:
5197  AllocatorT m_Allocator;
5198  T* m_pArray;
5199  size_t m_Count;
5200  size_t m_Capacity;
5201 };
5202 
5203 template<typename T, typename allocatorT>
5204 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
5205 {
5206  vec.insert(index, item);
5207 }
5208 
5209 template<typename T, typename allocatorT>
5210 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
5211 {
5212  vec.remove(index);
5213 }
5214 
5215 #endif // #if VMA_USE_STL_VECTOR
5216 
5217 template<typename CmpLess, typename VectorT>
5218 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
5219 {
5220  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5221  vector.data(),
5222  vector.data() + vector.size(),
5223  value,
5224  CmpLess()) - vector.data();
5225  VmaVectorInsert(vector, indexToInsert, value);
5226  return indexToInsert;
5227 }
5228 
5229 template<typename CmpLess, typename VectorT>
5230 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
5231 {
5232  CmpLess comparator;
5233  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
5234  vector.begin(),
5235  vector.end(),
5236  value,
5237  comparator);
5238  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
5239  {
5240  size_t indexToRemove = it - vector.begin();
5241  VmaVectorRemove(vector, indexToRemove);
5242  return true;
5243  }
5244  return false;
5245 }
5246 
5248 // class VmaSmallVector
5249 
5250 /*
5251 This is a vector (a variable-sized array), optimized for the case when the array is small.
5252 
5253 It contains some number of elements in-place, which allows it to avoid heap allocation
5254 when the actual number of elements is below that threshold. This allows normal "small"
5255 cases to be fast without losing generality for large inputs.
5256 */
5257 
5258 template<typename T, typename AllocatorT, size_t N>
5259 class VmaSmallVector
5260 {
5261 public:
5262  typedef T value_type;
5263 
5264  VmaSmallVector(const AllocatorT& allocator) :
5265  m_Count(0),
5266  m_DynamicArray(allocator)
5267  {
5268  }
5269  VmaSmallVector(size_t count, const AllocatorT& allocator) :
5270  m_Count(count),
5271  m_DynamicArray(count > N ? count : 0, allocator)
5272  {
5273  }
5274  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5275  VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) = delete;
5276  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5277  VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) = delete;
5278 
5279  bool empty() const { return m_Count == 0; }
5280  size_t size() const { return m_Count; }
5281  T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5282  const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5283 
5284  T& operator[](size_t index)
5285  {
5286  VMA_HEAVY_ASSERT(index < m_Count);
5287  return data()[index];
5288  }
5289  const T& operator[](size_t index) const
5290  {
5291  VMA_HEAVY_ASSERT(index < m_Count);
5292  return data()[index];
5293  }
5294 
5295  T& front()
5296  {
5297  VMA_HEAVY_ASSERT(m_Count > 0);
5298  return data()[0];
5299  }
5300  const T& front() const
5301  {
5302  VMA_HEAVY_ASSERT(m_Count > 0);
5303  return data()[0];
5304  }
5305  T& back()
5306  {
5307  VMA_HEAVY_ASSERT(m_Count > 0);
5308  return data()[m_Count - 1];
5309  }
5310  const T& back() const
5311  {
5312  VMA_HEAVY_ASSERT(m_Count > 0);
5313  return data()[m_Count - 1];
5314  }
5315 
5316  void resize(size_t newCount, bool freeMemory = false)
5317  {
5318  if(newCount > N && m_Count > N)
5319  {
5320  // Any direction, staying in m_DynamicArray
5321  m_DynamicArray.resize(newCount, freeMemory);
5322  }
5323  else if(newCount > N && m_Count <= N)
5324  {
5325  // Growing, moving from m_StaticArray to m_DynamicArray
5326  m_DynamicArray.resize(newCount, freeMemory);
5327  if(m_Count > 0)
5328  {
5329  memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
5330  }
5331  }
5332  else if(newCount <= N && m_Count > N)
5333  {
5334  // Shrinking, moving from m_DynamicArray to m_StaticArray
5335  if(newCount > 0)
5336  {
5337  memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
5338  }
5339  m_DynamicArray.resize(0, freeMemory);
5340  }
5341  else
5342  {
5343  // Any direction, staying in m_StaticArray - nothing to do here
5344  }
5345  m_Count = newCount;
5346  }
5347 
5348  void clear(bool freeMemory = false)
5349  {
5350  m_DynamicArray.clear(freeMemory);
5351  m_Count = 0;
5352  }
5353 
5354  void insert(size_t index, const T& src)
5355  {
5356  VMA_HEAVY_ASSERT(index <= m_Count);
5357  const size_t oldCount = size();
5358  resize(oldCount + 1);
5359  T* const dataPtr = data();
5360  if(index < oldCount)
5361  {
5362  // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
5363  memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
5364  }
5365  dataPtr[index] = src;
5366  }
5367 
5368  void remove(size_t index)
5369  {
5370  VMA_HEAVY_ASSERT(index < m_Count);
5371  const size_t oldCount = size();
5372  if(index < oldCount - 1)
5373  {
5374  // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
5375  T* const dataPtr = data();
5376  memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
5377  }
5378  resize(oldCount - 1);
5379  }
5380 
5381  void push_back(const T& src)
5382  {
5383  const size_t newIndex = size();
5384  resize(newIndex + 1);
5385  data()[newIndex] = src;
5386  }
5387 
5388  void pop_back()
5389  {
5390  VMA_HEAVY_ASSERT(m_Count > 0);
5391  resize(size() - 1);
5392  }
5393 
5394  void push_front(const T& src)
5395  {
5396  insert(0, src);
5397  }
5398 
5399  void pop_front()
5400  {
5401  VMA_HEAVY_ASSERT(m_Count > 0);
5402  remove(0);
5403  }
5404 
5405  typedef T* iterator;
5406 
5407  iterator begin() { return data(); }
5408  iterator end() { return data() + m_Count; }
5409 
5410 private:
5411  size_t m_Count;
5412  T m_StaticArray[N]; // Used when m_Size <= N
5413  VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
5414 };
5415 
5417 // class VmaPoolAllocator
5418 
5419 /*
5420 Allocator for objects of type T using a list of arrays (pools) to speed up
5421 allocation. Number of elements that can be allocated is not bounded because
5422 allocator can create multiple blocks.
5423 */
5424 template<typename T>
5425 class VmaPoolAllocator
5426 {
5427  VMA_CLASS_NO_COPY(VmaPoolAllocator)
5428 public:
5429  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
5430  ~VmaPoolAllocator();
5431  template<typename... Types> T* Alloc(Types... args);
5432  void Free(T* ptr);
5433 
5434 private:
5435  union Item
5436  {
5437  uint32_t NextFreeIndex;
5438  alignas(T) char Value[sizeof(T)];
5439  };
5440 
5441  struct ItemBlock
5442  {
5443  Item* pItems;
5444  uint32_t Capacity;
5445  uint32_t FirstFreeIndex;
5446  };
5447 
5448  const VkAllocationCallbacks* m_pAllocationCallbacks;
5449  const uint32_t m_FirstBlockCapacity;
5450  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
5451 
5452  ItemBlock& CreateNewBlock();
5453 };
5454 
5455 template<typename T>
5456 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
5457  m_pAllocationCallbacks(pAllocationCallbacks),
5458  m_FirstBlockCapacity(firstBlockCapacity),
5459  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
5460 {
5461  VMA_ASSERT(m_FirstBlockCapacity > 1);
5462 }
5463 
5464 template<typename T>
5465 VmaPoolAllocator<T>::~VmaPoolAllocator()
5466 {
5467  for(size_t i = m_ItemBlocks.size(); i--; )
5468  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
5469  m_ItemBlocks.clear();
5470 }
5471 
5472 template<typename T>
5473 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
5474 {
5475  for(size_t i = m_ItemBlocks.size(); i--; )
5476  {
5477  ItemBlock& block = m_ItemBlocks[i];
5478  // This block has some free items: Use first one.
5479  if(block.FirstFreeIndex != UINT32_MAX)
5480  {
5481  Item* const pItem = &block.pItems[block.FirstFreeIndex];
5482  block.FirstFreeIndex = pItem->NextFreeIndex;
5483  T* result = (T*)&pItem->Value;
5484  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5485  return result;
5486  }
5487  }
5488 
5489  // No block has free item: Create new one and use it.
5490  ItemBlock& newBlock = CreateNewBlock();
5491  Item* const pItem = &newBlock.pItems[0];
5492  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
5493  T* result = (T*)&pItem->Value;
5494  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5495  return result;
5496 }
5497 
5498 template<typename T>
5499 void VmaPoolAllocator<T>::Free(T* ptr)
5500 {
5501  // Search all memory blocks to find ptr.
5502  for(size_t i = m_ItemBlocks.size(); i--; )
5503  {
5504  ItemBlock& block = m_ItemBlocks[i];
5505 
5506  // Casting to union.
5507  Item* pItemPtr;
5508  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
5509 
5510  // Check if pItemPtr is in address range of this block.
5511  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
5512  {
5513  ptr->~T(); // Explicit destructor call.
5514  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
5515  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
5516  block.FirstFreeIndex = index;
5517  return;
5518  }
5519  }
5520  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
5521 }
5522 
5523 template<typename T>
5524 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
5525 {
5526  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
5527  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
5528 
5529  const ItemBlock newBlock = {
5530  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
5531  newBlockCapacity,
5532  0 };
5533 
5534  m_ItemBlocks.push_back(newBlock);
5535 
5536  // Setup singly-linked list of all free items in this block.
5537  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
5538  newBlock.pItems[i].NextFreeIndex = i + 1;
5539  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
5540  return m_ItemBlocks.back();
5541 }
5542 
5544 // class VmaRawList, VmaList
5545 
5546 #if VMA_USE_STL_LIST
5547 
5548 #define VmaList std::list
5549 
5550 #else // #if VMA_USE_STL_LIST
5551 
5552 template<typename T>
5553 struct VmaListItem
5554 {
5555  VmaListItem* pPrev;
5556  VmaListItem* pNext;
5557  T Value;
5558 };
5559 
5560 // Doubly linked list.
5561 template<typename T>
5562 class VmaRawList
5563 {
5564  VMA_CLASS_NO_COPY(VmaRawList)
5565 public:
5566  typedef VmaListItem<T> ItemType;
5567 
5568  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
5569  ~VmaRawList();
5570  void Clear();
5571 
5572  size_t GetCount() const { return m_Count; }
5573  bool IsEmpty() const { return m_Count == 0; }
5574 
5575  ItemType* Front() { return m_pFront; }
5576  const ItemType* Front() const { return m_pFront; }
5577  ItemType* Back() { return m_pBack; }
5578  const ItemType* Back() const { return m_pBack; }
5579 
5580  ItemType* PushBack();
5581  ItemType* PushFront();
5582  ItemType* PushBack(const T& value);
5583  ItemType* PushFront(const T& value);
5584  void PopBack();
5585  void PopFront();
5586 
5587  // Item can be null - it means PushBack.
5588  ItemType* InsertBefore(ItemType* pItem);
5589  // Item can be null - it means PushFront.
5590  ItemType* InsertAfter(ItemType* pItem);
5591 
5592  ItemType* InsertBefore(ItemType* pItem, const T& value);
5593  ItemType* InsertAfter(ItemType* pItem, const T& value);
5594 
5595  void Remove(ItemType* pItem);
5596 
5597 private:
5598  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5599  VmaPoolAllocator<ItemType> m_ItemAllocator;
5600  ItemType* m_pFront;
5601  ItemType* m_pBack;
5602  size_t m_Count;
5603 };
5604 
5605 template<typename T>
5606 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5607  m_pAllocationCallbacks(pAllocationCallbacks),
5608  m_ItemAllocator(pAllocationCallbacks, 128),
5609  m_pFront(VMA_NULL),
5610  m_pBack(VMA_NULL),
5611  m_Count(0)
5612 {
5613 }
5614 
5615 template<typename T>
5616 VmaRawList<T>::~VmaRawList()
5617 {
5618  // Intentionally not calling Clear, because that would be unnecessary
5619  // computations to return all items to m_ItemAllocator as free.
5620 }
5621 
5622 template<typename T>
5623 void VmaRawList<T>::Clear()
5624 {
5625  if(IsEmpty() == false)
5626  {
5627  ItemType* pItem = m_pBack;
5628  while(pItem != VMA_NULL)
5629  {
5630  ItemType* const pPrevItem = pItem->pPrev;
5631  m_ItemAllocator.Free(pItem);
5632  pItem = pPrevItem;
5633  }
5634  m_pFront = VMA_NULL;
5635  m_pBack = VMA_NULL;
5636  m_Count = 0;
5637  }
5638 }
5639 
5640 template<typename T>
5641 VmaListItem<T>* VmaRawList<T>::PushBack()
5642 {
5643  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5644  pNewItem->pNext = VMA_NULL;
5645  if(IsEmpty())
5646  {
5647  pNewItem->pPrev = VMA_NULL;
5648  m_pFront = pNewItem;
5649  m_pBack = pNewItem;
5650  m_Count = 1;
5651  }
5652  else
5653  {
5654  pNewItem->pPrev = m_pBack;
5655  m_pBack->pNext = pNewItem;
5656  m_pBack = pNewItem;
5657  ++m_Count;
5658  }
5659  return pNewItem;
5660 }
5661 
5662 template<typename T>
5663 VmaListItem<T>* VmaRawList<T>::PushFront()
5664 {
5665  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5666  pNewItem->pPrev = VMA_NULL;
5667  if(IsEmpty())
5668  {
5669  pNewItem->pNext = VMA_NULL;
5670  m_pFront = pNewItem;
5671  m_pBack = pNewItem;
5672  m_Count = 1;
5673  }
5674  else
5675  {
5676  pNewItem->pNext = m_pFront;
5677  m_pFront->pPrev = pNewItem;
5678  m_pFront = pNewItem;
5679  ++m_Count;
5680  }
5681  return pNewItem;
5682 }
5683 
5684 template<typename T>
5685 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5686 {
5687  ItemType* const pNewItem = PushBack();
5688  pNewItem->Value = value;
5689  return pNewItem;
5690 }
5691 
5692 template<typename T>
5693 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5694 {
5695  ItemType* const pNewItem = PushFront();
5696  pNewItem->Value = value;
5697  return pNewItem;
5698 }
5699 
5700 template<typename T>
5701 void VmaRawList<T>::PopBack()
5702 {
5703  VMA_HEAVY_ASSERT(m_Count > 0);
5704  ItemType* const pBackItem = m_pBack;
5705  ItemType* const pPrevItem = pBackItem->pPrev;
5706  if(pPrevItem != VMA_NULL)
5707  {
5708  pPrevItem->pNext = VMA_NULL;
5709  }
5710  m_pBack = pPrevItem;
5711  m_ItemAllocator.Free(pBackItem);
5712  --m_Count;
5713 }
5714 
5715 template<typename T>
5716 void VmaRawList<T>::PopFront()
5717 {
5718  VMA_HEAVY_ASSERT(m_Count > 0);
5719  ItemType* const pFrontItem = m_pFront;
5720  ItemType* const pNextItem = pFrontItem->pNext;
5721  if(pNextItem != VMA_NULL)
5722  {
5723  pNextItem->pPrev = VMA_NULL;
5724  }
5725  m_pFront = pNextItem;
5726  m_ItemAllocator.Free(pFrontItem);
5727  --m_Count;
5728 }
5729 
5730 template<typename T>
5731 void VmaRawList<T>::Remove(ItemType* pItem)
5732 {
5733  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5734  VMA_HEAVY_ASSERT(m_Count > 0);
5735 
5736  if(pItem->pPrev != VMA_NULL)
5737  {
5738  pItem->pPrev->pNext = pItem->pNext;
5739  }
5740  else
5741  {
5742  VMA_HEAVY_ASSERT(m_pFront == pItem);
5743  m_pFront = pItem->pNext;
5744  }
5745 
5746  if(pItem->pNext != VMA_NULL)
5747  {
5748  pItem->pNext->pPrev = pItem->pPrev;
5749  }
5750  else
5751  {
5752  VMA_HEAVY_ASSERT(m_pBack == pItem);
5753  m_pBack = pItem->pPrev;
5754  }
5755 
5756  m_ItemAllocator.Free(pItem);
5757  --m_Count;
5758 }
5759 
5760 template<typename T>
5761 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5762 {
5763  if(pItem != VMA_NULL)
5764  {
5765  ItemType* const prevItem = pItem->pPrev;
5766  ItemType* const newItem = m_ItemAllocator.Alloc();
5767  newItem->pPrev = prevItem;
5768  newItem->pNext = pItem;
5769  pItem->pPrev = newItem;
5770  if(prevItem != VMA_NULL)
5771  {
5772  prevItem->pNext = newItem;
5773  }
5774  else
5775  {
5776  VMA_HEAVY_ASSERT(m_pFront == pItem);
5777  m_pFront = newItem;
5778  }
5779  ++m_Count;
5780  return newItem;
5781  }
5782  else
5783  return PushBack();
5784 }
5785 
5786 template<typename T>
5787 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5788 {
5789  if(pItem != VMA_NULL)
5790  {
5791  ItemType* const nextItem = pItem->pNext;
5792  ItemType* const newItem = m_ItemAllocator.Alloc();
5793  newItem->pNext = nextItem;
5794  newItem->pPrev = pItem;
5795  pItem->pNext = newItem;
5796  if(nextItem != VMA_NULL)
5797  {
5798  nextItem->pPrev = newItem;
5799  }
5800  else
5801  {
5802  VMA_HEAVY_ASSERT(m_pBack == pItem);
5803  m_pBack = newItem;
5804  }
5805  ++m_Count;
5806  return newItem;
5807  }
5808  else
5809  return PushFront();
5810 }
5811 
5812 template<typename T>
5813 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5814 {
5815  ItemType* const newItem = InsertBefore(pItem);
5816  newItem->Value = value;
5817  return newItem;
5818 }
5819 
5820 template<typename T>
5821 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5822 {
5823  ItemType* const newItem = InsertAfter(pItem);
5824  newItem->Value = value;
5825  return newItem;
5826 }
5827 
5828 template<typename T, typename AllocatorT>
5829 class VmaList
5830 {
5831  VMA_CLASS_NO_COPY(VmaList)
5832 public:
5833  class iterator
5834  {
5835  public:
5836  iterator() :
5837  m_pList(VMA_NULL),
5838  m_pItem(VMA_NULL)
5839  {
5840  }
5841 
5842  T& operator*() const
5843  {
5844  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5845  return m_pItem->Value;
5846  }
5847  T* operator->() const
5848  {
5849  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5850  return &m_pItem->Value;
5851  }
5852 
5853  iterator& operator++()
5854  {
5855  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5856  m_pItem = m_pItem->pNext;
5857  return *this;
5858  }
5859  iterator& operator--()
5860  {
5861  if(m_pItem != VMA_NULL)
5862  {
5863  m_pItem = m_pItem->pPrev;
5864  }
5865  else
5866  {
5867  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5868  m_pItem = m_pList->Back();
5869  }
5870  return *this;
5871  }
5872 
5873  iterator operator++(int)
5874  {
5875  iterator result = *this;
5876  ++*this;
5877  return result;
5878  }
5879  iterator operator--(int)
5880  {
5881  iterator result = *this;
5882  --*this;
5883  return result;
5884  }
5885 
5886  bool operator==(const iterator& rhs) const
5887  {
5888  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5889  return m_pItem == rhs.m_pItem;
5890  }
5891  bool operator!=(const iterator& rhs) const
5892  {
5893  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5894  return m_pItem != rhs.m_pItem;
5895  }
5896 
5897  private:
5898  VmaRawList<T>* m_pList;
5899  VmaListItem<T>* m_pItem;
5900 
5901  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5902  m_pList(pList),
5903  m_pItem(pItem)
5904  {
5905  }
5906 
5907  friend class VmaList<T, AllocatorT>;
5908  };
5909 
5910  class const_iterator
5911  {
5912  public:
5913  const_iterator() :
5914  m_pList(VMA_NULL),
5915  m_pItem(VMA_NULL)
5916  {
5917  }
5918 
5919  const_iterator(const iterator& src) :
5920  m_pList(src.m_pList),
5921  m_pItem(src.m_pItem)
5922  {
5923  }
5924 
5925  const T& operator*() const
5926  {
5927  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5928  return m_pItem->Value;
5929  }
5930  const T* operator->() const
5931  {
5932  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5933  return &m_pItem->Value;
5934  }
5935 
5936  const_iterator& operator++()
5937  {
5938  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5939  m_pItem = m_pItem->pNext;
5940  return *this;
5941  }
5942  const_iterator& operator--()
5943  {
5944  if(m_pItem != VMA_NULL)
5945  {
5946  m_pItem = m_pItem->pPrev;
5947  }
5948  else
5949  {
5950  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5951  m_pItem = m_pList->Back();
5952  }
5953  return *this;
5954  }
5955 
5956  const_iterator operator++(int)
5957  {
5958  const_iterator result = *this;
5959  ++*this;
5960  return result;
5961  }
5962  const_iterator operator--(int)
5963  {
5964  const_iterator result = *this;
5965  --*this;
5966  return result;
5967  }
5968 
5969  bool operator==(const const_iterator& rhs) const
5970  {
5971  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5972  return m_pItem == rhs.m_pItem;
5973  }
5974  bool operator!=(const const_iterator& rhs) const
5975  {
5976  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5977  return m_pItem != rhs.m_pItem;
5978  }
5979 
5980  private:
5981  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5982  m_pList(pList),
5983  m_pItem(pItem)
5984  {
5985  }
5986 
5987  const VmaRawList<T>* m_pList;
5988  const VmaListItem<T>* m_pItem;
5989 
5990  friend class VmaList<T, AllocatorT>;
5991  };
5992 
5993  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5994 
5995  bool empty() const { return m_RawList.IsEmpty(); }
5996  size_t size() const { return m_RawList.GetCount(); }
5997 
5998  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5999  iterator end() { return iterator(&m_RawList, VMA_NULL); }
6000 
6001  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
6002  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
6003 
6004  void clear() { m_RawList.Clear(); }
6005  void push_back(const T& value) { m_RawList.PushBack(value); }
6006  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
6007  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
6008 
6009 private:
6010  VmaRawList<T> m_RawList;
6011 };
6012 
6013 #endif // #if VMA_USE_STL_LIST
6014 
6016 // class VmaMap
6017 
6018 // Unused in this version.
6019 #if 0
6020 
6021 #if VMA_USE_STL_UNORDERED_MAP
6022 
6023 #define VmaPair std::pair
6024 
6025 #define VMA_MAP_TYPE(KeyT, ValueT) \
6026  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
6027 
6028 #else // #if VMA_USE_STL_UNORDERED_MAP
6029 
6030 template<typename T1, typename T2>
6031 struct VmaPair
6032 {
6033  T1 first;
6034  T2 second;
6035 
6036  VmaPair() : first(), second() { }
6037  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
6038 };
6039 
6040 /* Class compatible with subset of interface of std::unordered_map.
6041 KeyT, ValueT must be POD because they will be stored in VmaVector.
6042 */
6043 template<typename KeyT, typename ValueT>
6044 class VmaMap
6045 {
6046 public:
6047  typedef VmaPair<KeyT, ValueT> PairType;
6048  typedef PairType* iterator;
6049 
6050  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
6051 
6052  iterator begin() { return m_Vector.begin(); }
6053  iterator end() { return m_Vector.end(); }
6054 
6055  void insert(const PairType& pair);
6056  iterator find(const KeyT& key);
6057  void erase(iterator it);
6058 
6059 private:
6060  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
6061 };
6062 
6063 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
6064 
6065 template<typename FirstT, typename SecondT>
6066 struct VmaPairFirstLess
6067 {
6068  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
6069  {
6070  return lhs.first < rhs.first;
6071  }
6072  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
6073  {
6074  return lhs.first < rhsFirst;
6075  }
6076 };
6077 
6078 template<typename KeyT, typename ValueT>
6079 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
6080 {
6081  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
6082  m_Vector.data(),
6083  m_Vector.data() + m_Vector.size(),
6084  pair,
6085  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
6086  VmaVectorInsert(m_Vector, indexToInsert, pair);
6087 }
6088 
6089 template<typename KeyT, typename ValueT>
6090 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
6091 {
6092  PairType* it = VmaBinaryFindFirstNotLess(
6093  m_Vector.data(),
6094  m_Vector.data() + m_Vector.size(),
6095  key,
6096  VmaPairFirstLess<KeyT, ValueT>());
6097  if((it != m_Vector.end()) && (it->first == key))
6098  {
6099  return it;
6100  }
6101  else
6102  {
6103  return m_Vector.end();
6104  }
6105 }
6106 
6107 template<typename KeyT, typename ValueT>
6108 void VmaMap<KeyT, ValueT>::erase(iterator it)
6109 {
6110  VmaVectorRemove(m_Vector, it - m_Vector.begin());
6111 }
6112 
6113 #endif // #if VMA_USE_STL_UNORDERED_MAP
6114 
6115 #endif // #if 0
6116 
6118 
6119 class VmaDeviceMemoryBlock;
6120 
6121 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
6122 
6123 struct VmaAllocation_T
6124 {
6125 private:
6126  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
6127 
6128  enum FLAGS
6129  {
6130  FLAG_USER_DATA_STRING = 0x01,
6131  };
6132 
6133 public:
6134  enum ALLOCATION_TYPE
6135  {
6136  ALLOCATION_TYPE_NONE,
6137  ALLOCATION_TYPE_BLOCK,
6138  ALLOCATION_TYPE_DEDICATED,
6139  };
6140 
6141  /*
6142  This struct is allocated using VmaPoolAllocator.
6143  */
6144 
6145  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
6146  m_Alignment{1},
6147  m_Size{0},
6148  m_pUserData{VMA_NULL},
6149  m_LastUseFrameIndex{currentFrameIndex},
6150  m_MemoryTypeIndex{0},
6151  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
6152  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
6153  m_MapCount{0},
6154  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
6155  {
6156 #if VMA_STATS_STRING_ENABLED
6157  m_CreationFrameIndex = currentFrameIndex;
6158  m_BufferImageUsage = 0;
6159 #endif
6160  }
6161 
6162  ~VmaAllocation_T()
6163  {
6164  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
6165 
6166  // Check if owned string was freed.
6167  VMA_ASSERT(m_pUserData == VMA_NULL);
6168  }
6169 
6170  void InitBlockAllocation(
6171  VmaDeviceMemoryBlock* block,
6172  VkDeviceSize offset,
6173  VkDeviceSize alignment,
6174  VkDeviceSize size,
6175  uint32_t memoryTypeIndex,
6176  VmaSuballocationType suballocationType,
6177  bool mapped,
6178  bool canBecomeLost)
6179  {
6180  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6181  VMA_ASSERT(block != VMA_NULL);
6182  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6183  m_Alignment = alignment;
6184  m_Size = size;
6185  m_MemoryTypeIndex = memoryTypeIndex;
6186  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6187  m_SuballocationType = (uint8_t)suballocationType;
6188  m_BlockAllocation.m_Block = block;
6189  m_BlockAllocation.m_Offset = offset;
6190  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
6191  }
6192 
6193  void InitLost()
6194  {
6195  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6196  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
6197  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6198  m_MemoryTypeIndex = 0;
6199  m_BlockAllocation.m_Block = VMA_NULL;
6200  m_BlockAllocation.m_Offset = 0;
6201  m_BlockAllocation.m_CanBecomeLost = true;
6202  }
6203 
6204  void ChangeBlockAllocation(
6205  VmaAllocator hAllocator,
6206  VmaDeviceMemoryBlock* block,
6207  VkDeviceSize offset);
6208 
6209  void ChangeOffset(VkDeviceSize newOffset);
6210 
6211  // pMappedData not null means allocation is created with MAPPED flag.
6212  void InitDedicatedAllocation(
6213  uint32_t memoryTypeIndex,
6214  VkDeviceMemory hMemory,
6215  VmaSuballocationType suballocationType,
6216  void* pMappedData,
6217  VkDeviceSize size)
6218  {
6219  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6220  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
6221  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
6222  m_Alignment = 0;
6223  m_Size = size;
6224  m_MemoryTypeIndex = memoryTypeIndex;
6225  m_SuballocationType = (uint8_t)suballocationType;
6226  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6227  m_DedicatedAllocation.m_hMemory = hMemory;
6228  m_DedicatedAllocation.m_pMappedData = pMappedData;
6229  }
6230 
6231  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
6232  VkDeviceSize GetAlignment() const { return m_Alignment; }
6233  VkDeviceSize GetSize() const { return m_Size; }
6234  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
6235  void* GetUserData() const { return m_pUserData; }
6236  void SetUserData(VmaAllocator hAllocator, void* pUserData);
6237  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
6238 
6239  VmaDeviceMemoryBlock* GetBlock() const
6240  {
6241  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6242  return m_BlockAllocation.m_Block;
6243  }
6244  VkDeviceSize GetOffset() const;
6245  VkDeviceMemory GetMemory() const;
6246  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6247  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
6248  void* GetMappedData() const;
6249  bool CanBecomeLost() const;
6250 
6251  uint32_t GetLastUseFrameIndex() const
6252  {
6253  return m_LastUseFrameIndex.load();
6254  }
6255  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
6256  {
6257  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
6258  }
6259  /*
6260  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
6261  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
6262  - Else, returns false.
6263 
6264  If hAllocation is already lost, assert - you should not call it then.
6265  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
6266  */
6267  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6268 
6269  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
6270  {
6271  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
6272  outInfo.blockCount = 1;
6273  outInfo.allocationCount = 1;
6274  outInfo.unusedRangeCount = 0;
6275  outInfo.usedBytes = m_Size;
6276  outInfo.unusedBytes = 0;
6277  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
6278  outInfo.unusedRangeSizeMin = UINT64_MAX;
6279  outInfo.unusedRangeSizeMax = 0;
6280  }
6281 
6282  void BlockAllocMap();
6283  void BlockAllocUnmap();
6284  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
6285  void DedicatedAllocUnmap(VmaAllocator hAllocator);
6286 
6287 #if VMA_STATS_STRING_ENABLED
6288  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
6289  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
6290 
6291  void InitBufferImageUsage(uint32_t bufferImageUsage)
6292  {
6293  VMA_ASSERT(m_BufferImageUsage == 0);
6294  m_BufferImageUsage = bufferImageUsage;
6295  }
6296 
6297  void PrintParameters(class VmaJsonWriter& json) const;
6298 #endif
6299 
6300 private:
6301  VkDeviceSize m_Alignment;
6302  VkDeviceSize m_Size;
6303  void* m_pUserData;
6304  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
6305  uint32_t m_MemoryTypeIndex;
6306  uint8_t m_Type; // ALLOCATION_TYPE
6307  uint8_t m_SuballocationType; // VmaSuballocationType
6308  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
6309  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
6310  uint8_t m_MapCount;
6311  uint8_t m_Flags; // enum FLAGS
6312 
6313  // Allocation out of VmaDeviceMemoryBlock.
6314  struct BlockAllocation
6315  {
6316  VmaDeviceMemoryBlock* m_Block;
6317  VkDeviceSize m_Offset;
6318  bool m_CanBecomeLost;
6319  };
6320 
6321  // Allocation for an object that has its own private VkDeviceMemory.
6322  struct DedicatedAllocation
6323  {
6324  VkDeviceMemory m_hMemory;
6325  void* m_pMappedData; // Not null means memory is mapped.
6326  };
6327 
6328  union
6329  {
6330  // Allocation out of VmaDeviceMemoryBlock.
6331  BlockAllocation m_BlockAllocation;
6332  // Allocation for an object that has its own private VkDeviceMemory.
6333  DedicatedAllocation m_DedicatedAllocation;
6334  };
6335 
6336 #if VMA_STATS_STRING_ENABLED
6337  uint32_t m_CreationFrameIndex;
6338  uint32_t m_BufferImageUsage; // 0 if unknown.
6339 #endif
6340 
6341  void FreeUserDataString(VmaAllocator hAllocator);
6342 };
6343 
6344 /*
6345 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
6346 allocated memory block or free.
6347 */
6348 struct VmaSuballocation
6349 {
6350  VkDeviceSize offset;
6351  VkDeviceSize size;
6352  VmaAllocation hAllocation;
6353  VmaSuballocationType type;
6354 };
6355 
6356 // Comparator for offsets.
6357 struct VmaSuballocationOffsetLess
6358 {
6359  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6360  {
6361  return lhs.offset < rhs.offset;
6362  }
6363 };
6364 struct VmaSuballocationOffsetGreater
6365 {
6366  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6367  {
6368  return lhs.offset > rhs.offset;
6369  }
6370 };
6371 
6372 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
6373 
6374 // Cost of one additional allocation lost, as equivalent in bytes.
6375 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
6376 
6377 enum class VmaAllocationRequestType
6378 {
6379  Normal,
6380  // Used by "Linear" algorithm.
6381  UpperAddress,
6382  EndOf1st,
6383  EndOf2nd,
6384 };
6385 
6386 /*
6387 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
6388 
6389 If canMakeOtherLost was false:
6390 - item points to a FREE suballocation.
6391 - itemsToMakeLostCount is 0.
6392 
6393 If canMakeOtherLost was true:
6394 - item points to first of sequence of suballocations, which are either FREE,
6395  or point to VmaAllocations that can become lost.
6396 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
6397  the requested allocation to succeed.
6398 */
6399 struct VmaAllocationRequest
6400 {
6401  VkDeviceSize offset;
6402  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
6403  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
6404  VmaSuballocationList::iterator item;
6405  size_t itemsToMakeLostCount;
6406  void* customData;
6407  VmaAllocationRequestType type;
6408 
6409  VkDeviceSize CalcCost() const
6410  {
6411  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
6412  }
6413 };
6414 
6415 /*
6416 Data structure used for bookkeeping of allocations and unused ranges of memory
6417 in a single VkDeviceMemory block.
6418 */
6419 class VmaBlockMetadata
6420 {
6421 public:
6422  VmaBlockMetadata(VmaAllocator hAllocator);
6423  virtual ~VmaBlockMetadata() { }
6424  virtual void Init(VkDeviceSize size) { m_Size = size; }
6425 
6426  // Validates all data structures inside this object. If not valid, returns false.
6427  virtual bool Validate() const = 0;
6428  VkDeviceSize GetSize() const { return m_Size; }
6429  virtual size_t GetAllocationCount() const = 0;
6430  virtual VkDeviceSize GetSumFreeSize() const = 0;
6431  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
6432  // Returns true if this block is empty - contains only single free suballocation.
6433  virtual bool IsEmpty() const = 0;
6434 
6435  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
6436  // Shouldn't modify blockCount.
6437  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
6438 
6439 #if VMA_STATS_STRING_ENABLED
6440  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
6441 #endif
6442 
6443  // Tries to find a place for suballocation with given parameters inside this block.
6444  // If succeeded, fills pAllocationRequest and returns true.
6445  // If failed, returns false.
6446  virtual bool CreateAllocationRequest(
6447  uint32_t currentFrameIndex,
6448  uint32_t frameInUseCount,
6449  VkDeviceSize bufferImageGranularity,
6450  VkDeviceSize allocSize,
6451  VkDeviceSize allocAlignment,
6452  bool upperAddress,
6453  VmaSuballocationType allocType,
6454  bool canMakeOtherLost,
6455  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6456  uint32_t strategy,
6457  VmaAllocationRequest* pAllocationRequest) = 0;
6458 
6459  virtual bool MakeRequestedAllocationsLost(
6460  uint32_t currentFrameIndex,
6461  uint32_t frameInUseCount,
6462  VmaAllocationRequest* pAllocationRequest) = 0;
6463 
6464  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
6465 
6466  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6467 
6468  // Makes actual allocation based on request. Request must already be checked and valid.
6469  virtual void Alloc(
6470  const VmaAllocationRequest& request,
6471  VmaSuballocationType type,
6472  VkDeviceSize allocSize,
6473  VmaAllocation hAllocation) = 0;
6474 
6475  // Frees suballocation assigned to given memory region.
6476  virtual void Free(const VmaAllocation allocation) = 0;
6477  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
6478 
6479 protected:
6480  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
6481 
6482 #if VMA_STATS_STRING_ENABLED
6483  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6484  VkDeviceSize unusedBytes,
6485  size_t allocationCount,
6486  size_t unusedRangeCount) const;
6487  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6488  VkDeviceSize offset,
6489  VmaAllocation hAllocation) const;
6490  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6491  VkDeviceSize offset,
6492  VkDeviceSize size) const;
6493  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6494 #endif
6495 
6496 private:
6497  VkDeviceSize m_Size;
6498  const VkAllocationCallbacks* m_pAllocationCallbacks;
6499 };
6500 
6501 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
6502  VMA_ASSERT(0 && "Validation failed: " #cond); \
6503  return false; \
6504  } } while(false)
6505 
6506 class VmaBlockMetadata_Generic : public VmaBlockMetadata
6507 {
6508  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
6509 public:
6510  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
6511  virtual ~VmaBlockMetadata_Generic();
6512  virtual void Init(VkDeviceSize size);
6513 
6514  virtual bool Validate() const;
6515  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
6516  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6517  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6518  virtual bool IsEmpty() const;
6519 
6520  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6521  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6522 
6523 #if VMA_STATS_STRING_ENABLED
6524  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6525 #endif
6526 
6527  virtual bool CreateAllocationRequest(
6528  uint32_t currentFrameIndex,
6529  uint32_t frameInUseCount,
6530  VkDeviceSize bufferImageGranularity,
6531  VkDeviceSize allocSize,
6532  VkDeviceSize allocAlignment,
6533  bool upperAddress,
6534  VmaSuballocationType allocType,
6535  bool canMakeOtherLost,
6536  uint32_t strategy,
6537  VmaAllocationRequest* pAllocationRequest);
6538 
6539  virtual bool MakeRequestedAllocationsLost(
6540  uint32_t currentFrameIndex,
6541  uint32_t frameInUseCount,
6542  VmaAllocationRequest* pAllocationRequest);
6543 
6544  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6545 
6546  virtual VkResult CheckCorruption(const void* pBlockData);
6547 
6548  virtual void Alloc(
6549  const VmaAllocationRequest& request,
6550  VmaSuballocationType type,
6551  VkDeviceSize allocSize,
6552  VmaAllocation hAllocation);
6553 
6554  virtual void Free(const VmaAllocation allocation);
6555  virtual void FreeAtOffset(VkDeviceSize offset);
6556 
6558  // For defragmentation
6559 
6560  bool IsBufferImageGranularityConflictPossible(
6561  VkDeviceSize bufferImageGranularity,
6562  VmaSuballocationType& inOutPrevSuballocType) const;
6563 
6564 private:
6565  friend class VmaDefragmentationAlgorithm_Generic;
6566  friend class VmaDefragmentationAlgorithm_Fast;
6567 
6568  uint32_t m_FreeCount;
6569  VkDeviceSize m_SumFreeSize;
6570  VmaSuballocationList m_Suballocations;
6571  // Suballocations that are free and have size greater than certain threshold.
6572  // Sorted by size, ascending.
6573  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
6574 
6575  bool ValidateFreeSuballocationList() const;
6576 
6577  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6578  // If yes, fills pOffset and returns true. If no, returns false.
6579  bool CheckAllocation(
6580  uint32_t currentFrameIndex,
6581  uint32_t frameInUseCount,
6582  VkDeviceSize bufferImageGranularity,
6583  VkDeviceSize allocSize,
6584  VkDeviceSize allocAlignment,
6585  VmaSuballocationType allocType,
6586  VmaSuballocationList::const_iterator suballocItem,
6587  bool canMakeOtherLost,
6588  VkDeviceSize* pOffset,
6589  size_t* itemsToMakeLostCount,
6590  VkDeviceSize* pSumFreeSize,
6591  VkDeviceSize* pSumItemSize) const;
6592  // Given free suballocation, it merges it with following one, which must also be free.
6593  void MergeFreeWithNext(VmaSuballocationList::iterator item);
6594  // Releases given suballocation, making it free.
6595  // Merges it with adjacent free suballocations if applicable.
6596  // Returns iterator to new free suballocation at this place.
6597  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6598  // Given free suballocation, it inserts it into sorted list of
6599  // m_FreeSuballocationsBySize if it's suitable.
6600  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6601  // Given free suballocation, it removes it from sorted list of
6602  // m_FreeSuballocationsBySize if it's suitable.
6603  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6604 };
6605 
6606 /*
6607 Allocations and their references in internal data structure look like this:
6608 
6609 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6610 
6611  0 +-------+
6612  | |
6613  | |
6614  | |
6615  +-------+
6616  | Alloc | 1st[m_1stNullItemsBeginCount]
6617  +-------+
6618  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6619  +-------+
6620  | ... |
6621  +-------+
6622  | Alloc | 1st[1st.size() - 1]
6623  +-------+
6624  | |
6625  | |
6626  | |
6627 GetSize() +-------+
6628 
6629 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6630 
6631  0 +-------+
6632  | Alloc | 2nd[0]
6633  +-------+
6634  | Alloc | 2nd[1]
6635  +-------+
6636  | ... |
6637  +-------+
6638  | Alloc | 2nd[2nd.size() - 1]
6639  +-------+
6640  | |
6641  | |
6642  | |
6643  +-------+
6644  | Alloc | 1st[m_1stNullItemsBeginCount]
6645  +-------+
6646  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6647  +-------+
6648  | ... |
6649  +-------+
6650  | Alloc | 1st[1st.size() - 1]
6651  +-------+
6652  | |
6653 GetSize() +-------+
6654 
6655 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6656 
6657  0 +-------+
6658  | |
6659  | |
6660  | |
6661  +-------+
6662  | Alloc | 1st[m_1stNullItemsBeginCount]
6663  +-------+
6664  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6665  +-------+
6666  | ... |
6667  +-------+
6668  | Alloc | 1st[1st.size() - 1]
6669  +-------+
6670  | |
6671  | |
6672  | |
6673  +-------+
6674  | Alloc | 2nd[2nd.size() - 1]
6675  +-------+
6676  | ... |
6677  +-------+
6678  | Alloc | 2nd[1]
6679  +-------+
6680  | Alloc | 2nd[0]
6681 GetSize() +-------+
6682 
6683 */
6684 class VmaBlockMetadata_Linear : public VmaBlockMetadata
6685 {
6686  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
6687 public:
6688  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
6689  virtual ~VmaBlockMetadata_Linear();
6690  virtual void Init(VkDeviceSize size);
6691 
6692  virtual bool Validate() const;
6693  virtual size_t GetAllocationCount() const;
6694  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6695  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6696  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
6697 
6698  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6699  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6700 
6701 #if VMA_STATS_STRING_ENABLED
6702  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6703 #endif
6704 
6705  virtual bool CreateAllocationRequest(
6706  uint32_t currentFrameIndex,
6707  uint32_t frameInUseCount,
6708  VkDeviceSize bufferImageGranularity,
6709  VkDeviceSize allocSize,
6710  VkDeviceSize allocAlignment,
6711  bool upperAddress,
6712  VmaSuballocationType allocType,
6713  bool canMakeOtherLost,
6714  uint32_t strategy,
6715  VmaAllocationRequest* pAllocationRequest);
6716 
6717  virtual bool MakeRequestedAllocationsLost(
6718  uint32_t currentFrameIndex,
6719  uint32_t frameInUseCount,
6720  VmaAllocationRequest* pAllocationRequest);
6721 
6722  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6723 
6724  virtual VkResult CheckCorruption(const void* pBlockData);
6725 
6726  virtual void Alloc(
6727  const VmaAllocationRequest& request,
6728  VmaSuballocationType type,
6729  VkDeviceSize allocSize,
6730  VmaAllocation hAllocation);
6731 
6732  virtual void Free(const VmaAllocation allocation);
6733  virtual void FreeAtOffset(VkDeviceSize offset);
6734 
6735 private:
6736  /*
6737  There are two suballocation vectors, used in ping-pong way.
6738  The one with index m_1stVectorIndex is called 1st.
6739  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
6740  2nd can be non-empty only when 1st is not empty.
6741  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
6742  */
6743  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
6744 
6745  enum SECOND_VECTOR_MODE
6746  {
6747  SECOND_VECTOR_EMPTY,
6748  /*
6749  Suballocations in 2nd vector are created later than the ones in 1st, but they
6750  all have smaller offset.
6751  */
6752  SECOND_VECTOR_RING_BUFFER,
6753  /*
6754  Suballocations in 2nd vector are upper side of double stack.
6755  They all have offsets higher than those in 1st vector.
6756  Top of this stack means smaller offsets, but higher indices in this vector.
6757  */
6758  SECOND_VECTOR_DOUBLE_STACK,
6759  };
6760 
6761  VkDeviceSize m_SumFreeSize;
6762  SuballocationVectorType m_Suballocations0, m_Suballocations1;
6763  uint32_t m_1stVectorIndex;
6764  SECOND_VECTOR_MODE m_2ndVectorMode;
6765 
6766  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6767  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6768  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6769  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6770 
6771  // Number of items in 1st vector with hAllocation = null at the beginning.
6772  size_t m_1stNullItemsBeginCount;
6773  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
6774  size_t m_1stNullItemsMiddleCount;
6775  // Number of items in 2nd vector with hAllocation = null.
6776  size_t m_2ndNullItemsCount;
6777 
6778  bool ShouldCompact1st() const;
6779  void CleanupAfterFree();
6780 
6781  bool CreateAllocationRequest_LowerAddress(
6782  uint32_t currentFrameIndex,
6783  uint32_t frameInUseCount,
6784  VkDeviceSize bufferImageGranularity,
6785  VkDeviceSize allocSize,
6786  VkDeviceSize allocAlignment,
6787  VmaSuballocationType allocType,
6788  bool canMakeOtherLost,
6789  uint32_t strategy,
6790  VmaAllocationRequest* pAllocationRequest);
6791  bool CreateAllocationRequest_UpperAddress(
6792  uint32_t currentFrameIndex,
6793  uint32_t frameInUseCount,
6794  VkDeviceSize bufferImageGranularity,
6795  VkDeviceSize allocSize,
6796  VkDeviceSize allocAlignment,
6797  VmaSuballocationType allocType,
6798  bool canMakeOtherLost,
6799  uint32_t strategy,
6800  VmaAllocationRequest* pAllocationRequest);
6801 };
6802 
6803 /*
6804 - GetSize() is the original size of allocated memory block.
6805 - m_UsableSize is this size aligned down to a power of two.
6806  All allocations and calculations happen relative to m_UsableSize.
6807 - GetUnusableSize() is the difference between them.
6808  It is repoted as separate, unused range, not available for allocations.
6809 
6810 Node at level 0 has size = m_UsableSize.
6811 Each next level contains nodes with size 2 times smaller than current level.
6812 m_LevelCount is the maximum number of levels to use in the current object.
6813 */
6814 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6815 {
6816  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6817 public:
6818  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6819  virtual ~VmaBlockMetadata_Buddy();
6820  virtual void Init(VkDeviceSize size);
6821 
6822  virtual bool Validate() const;
6823  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6824  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6825  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6826  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6827 
6828  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6829  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6830 
6831 #if VMA_STATS_STRING_ENABLED
6832  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6833 #endif
6834 
6835  virtual bool CreateAllocationRequest(
6836  uint32_t currentFrameIndex,
6837  uint32_t frameInUseCount,
6838  VkDeviceSize bufferImageGranularity,
6839  VkDeviceSize allocSize,
6840  VkDeviceSize allocAlignment,
6841  bool upperAddress,
6842  VmaSuballocationType allocType,
6843  bool canMakeOtherLost,
6844  uint32_t strategy,
6845  VmaAllocationRequest* pAllocationRequest);
6846 
6847  virtual bool MakeRequestedAllocationsLost(
6848  uint32_t currentFrameIndex,
6849  uint32_t frameInUseCount,
6850  VmaAllocationRequest* pAllocationRequest);
6851 
6852  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6853 
6854  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6855 
6856  virtual void Alloc(
6857  const VmaAllocationRequest& request,
6858  VmaSuballocationType type,
6859  VkDeviceSize allocSize,
6860  VmaAllocation hAllocation);
6861 
6862  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6863  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6864 
6865 private:
6866  static const VkDeviceSize MIN_NODE_SIZE = 32;
6867  static const size_t MAX_LEVELS = 30;
6868 
6869  struct ValidationContext
6870  {
6871  size_t calculatedAllocationCount;
6872  size_t calculatedFreeCount;
6873  VkDeviceSize calculatedSumFreeSize;
6874 
6875  ValidationContext() :
6876  calculatedAllocationCount(0),
6877  calculatedFreeCount(0),
6878  calculatedSumFreeSize(0) { }
6879  };
6880 
6881  struct Node
6882  {
6883  VkDeviceSize offset;
6884  enum TYPE
6885  {
6886  TYPE_FREE,
6887  TYPE_ALLOCATION,
6888  TYPE_SPLIT,
6889  TYPE_COUNT
6890  } type;
6891  Node* parent;
6892  Node* buddy;
6893 
6894  union
6895  {
6896  struct
6897  {
6898  Node* prev;
6899  Node* next;
6900  } free;
6901  struct
6902  {
6903  VmaAllocation alloc;
6904  } allocation;
6905  struct
6906  {
6907  Node* leftChild;
6908  } split;
6909  };
6910  };
6911 
6912  // Size of the memory block aligned down to a power of two.
6913  VkDeviceSize m_UsableSize;
6914  uint32_t m_LevelCount;
6915 
6916  Node* m_Root;
6917  struct {
6918  Node* front;
6919  Node* back;
6920  } m_FreeList[MAX_LEVELS];
6921  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6922  size_t m_AllocationCount;
6923  // Number of nodes in the tree with type == TYPE_FREE.
6924  size_t m_FreeCount;
6925  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6926  VkDeviceSize m_SumFreeSize;
6927 
6928  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6929  void DeleteNode(Node* node);
6930  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6931  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6932  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6933  // Alloc passed just for validation. Can be null.
6934  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6935  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6936  // Adds node to the front of FreeList at given level.
6937  // node->type must be FREE.
6938  // node->free.prev, next can be undefined.
6939  void AddToFreeListFront(uint32_t level, Node* node);
6940  // Removes node from FreeList at given level.
6941  // node->type must be FREE.
6942  // node->free.prev, next stay untouched.
6943  void RemoveFromFreeList(uint32_t level, Node* node);
6944 
6945 #if VMA_STATS_STRING_ENABLED
6946  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6947 #endif
6948 };
6949 
6950 /*
6951 Represents a single block of device memory (`VkDeviceMemory`) with all the
6952 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6953 
6954 Thread-safety: This class must be externally synchronized.
6955 */
6956 class VmaDeviceMemoryBlock
6957 {
6958  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6959 public:
6960  VmaBlockMetadata* m_pMetadata;
6961 
6962  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6963 
6964  ~VmaDeviceMemoryBlock()
6965  {
6966  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6967  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6968  }
6969 
6970  // Always call after construction.
6971  void Init(
6972  VmaAllocator hAllocator,
6973  VmaPool hParentPool,
6974  uint32_t newMemoryTypeIndex,
6975  VkDeviceMemory newMemory,
6976  VkDeviceSize newSize,
6977  uint32_t id,
6978  uint32_t algorithm);
6979  // Always call before destruction.
6980  void Destroy(VmaAllocator allocator);
6981 
6982  VmaPool GetParentPool() const { return m_hParentPool; }
6983  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6984  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6985  uint32_t GetId() const { return m_Id; }
6986  void* GetMappedData() const { return m_pMappedData; }
6987 
6988  // Validates all data structures inside this object. If not valid, returns false.
6989  bool Validate() const;
6990 
6991  VkResult CheckCorruption(VmaAllocator hAllocator);
6992 
6993  // ppData can be null.
6994  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6995  void Unmap(VmaAllocator hAllocator, uint32_t count);
6996 
6997  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6998  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6999 
7000  VkResult BindBufferMemory(
7001  const VmaAllocator hAllocator,
7002  const VmaAllocation hAllocation,
7003  VkDeviceSize allocationLocalOffset,
7004  VkBuffer hBuffer,
7005  const void* pNext);
7006  VkResult BindImageMemory(
7007  const VmaAllocator hAllocator,
7008  const VmaAllocation hAllocation,
7009  VkDeviceSize allocationLocalOffset,
7010  VkImage hImage,
7011  const void* pNext);
7012 
7013 private:
7014  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
7015  uint32_t m_MemoryTypeIndex;
7016  uint32_t m_Id;
7017  VkDeviceMemory m_hMemory;
7018 
7019  /*
7020  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
7021  Also protects m_MapCount, m_pMappedData.
7022  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
7023  */
7024  VMA_MUTEX m_Mutex;
7025  uint32_t m_MapCount;
7026  void* m_pMappedData;
7027 };
7028 
7029 struct VmaPointerLess
7030 {
7031  bool operator()(const void* lhs, const void* rhs) const
7032  {
7033  return lhs < rhs;
7034  }
7035 };
7036 
7037 struct VmaDefragmentationMove
7038 {
7039  size_t srcBlockIndex;
7040  size_t dstBlockIndex;
7041  VkDeviceSize srcOffset;
7042  VkDeviceSize dstOffset;
7043  VkDeviceSize size;
7044  VmaAllocation hAllocation;
7045  VmaDeviceMemoryBlock* pSrcBlock;
7046  VmaDeviceMemoryBlock* pDstBlock;
7047 };
7048 
7049 class VmaDefragmentationAlgorithm;
7050 
7051 /*
7052 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
7053 Vulkan memory type.
7054 
7055 Synchronized internally with a mutex.
7056 */
7057 struct VmaBlockVector
7058 {
7059  VMA_CLASS_NO_COPY(VmaBlockVector)
7060 public:
7061  VmaBlockVector(
7062  VmaAllocator hAllocator,
7063  VmaPool hParentPool,
7064  uint32_t memoryTypeIndex,
7065  VkDeviceSize preferredBlockSize,
7066  size_t minBlockCount,
7067  size_t maxBlockCount,
7068  VkDeviceSize bufferImageGranularity,
7069  uint32_t frameInUseCount,
7070  bool explicitBlockSize,
7071  uint32_t algorithm,
7072  float priority);
7073  ~VmaBlockVector();
7074 
7075  VkResult CreateMinBlocks();
7076 
7077  VmaAllocator GetAllocator() const { return m_hAllocator; }
7078  VmaPool GetParentPool() const { return m_hParentPool; }
7079  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
7080  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
7081  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
7082  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
7083  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
7084  uint32_t GetAlgorithm() const { return m_Algorithm; }
7085 
7086  void GetPoolStats(VmaPoolStats* pStats);
7087 
7088  bool IsEmpty();
7089  bool IsCorruptionDetectionEnabled() const;
7090 
7091  VkResult Allocate(
7092  uint32_t currentFrameIndex,
7093  VkDeviceSize size,
7094  VkDeviceSize alignment,
7095  const VmaAllocationCreateInfo& createInfo,
7096  VmaSuballocationType suballocType,
7097  size_t allocationCount,
7098  VmaAllocation* pAllocations);
7099 
7100  void Free(const VmaAllocation hAllocation);
7101 
7102  // Adds statistics of this BlockVector to pStats.
7103  void AddStats(VmaStats* pStats);
7104 
7105 #if VMA_STATS_STRING_ENABLED
7106  void PrintDetailedMap(class VmaJsonWriter& json);
7107 #endif
7108 
7109  void MakePoolAllocationsLost(
7110  uint32_t currentFrameIndex,
7111  size_t* pLostAllocationCount);
7112  VkResult CheckCorruption();
7113 
7114  // Saves results in pCtx->res.
7115  void Defragment(
7116  class VmaBlockVectorDefragmentationContext* pCtx,
7118  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
7119  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
7120  VkCommandBuffer commandBuffer);
7121  void DefragmentationEnd(
7122  class VmaBlockVectorDefragmentationContext* pCtx,
7123  uint32_t flags,
7124  VmaDefragmentationStats* pStats);
7125 
7126  uint32_t ProcessDefragmentations(
7127  class VmaBlockVectorDefragmentationContext *pCtx,
7128  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
7129 
7130  void CommitDefragmentations(
7131  class VmaBlockVectorDefragmentationContext *pCtx,
7132  VmaDefragmentationStats* pStats);
7133 
7135  // To be used only while the m_Mutex is locked. Used during defragmentation.
7136 
7137  size_t GetBlockCount() const { return m_Blocks.size(); }
7138  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
7139  size_t CalcAllocationCount() const;
7140  bool IsBufferImageGranularityConflictPossible() const;
7141 
7142 private:
7143  friend class VmaDefragmentationAlgorithm_Generic;
7144 
7145  const VmaAllocator m_hAllocator;
7146  const VmaPool m_hParentPool;
7147  const uint32_t m_MemoryTypeIndex;
7148  const VkDeviceSize m_PreferredBlockSize;
7149  const size_t m_MinBlockCount;
7150  const size_t m_MaxBlockCount;
7151  const VkDeviceSize m_BufferImageGranularity;
7152  const uint32_t m_FrameInUseCount;
7153  const bool m_ExplicitBlockSize;
7154  const uint32_t m_Algorithm;
7155  const float m_Priority;
7156  VMA_RW_MUTEX m_Mutex;
7157 
7158  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
7159  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
7160  bool m_HasEmptyBlock;
7161  // Incrementally sorted by sumFreeSize, ascending.
7162  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
7163  uint32_t m_NextBlockId;
7164 
7165  VkDeviceSize CalcMaxBlockSize() const;
7166 
7167  // Finds and removes given block from vector.
7168  void Remove(VmaDeviceMemoryBlock* pBlock);
7169 
7170  // Performs single step in sorting m_Blocks. They may not be fully sorted
7171  // after this call.
7172  void IncrementallySortBlocks();
7173 
7174  VkResult AllocatePage(
7175  uint32_t currentFrameIndex,
7176  VkDeviceSize size,
7177  VkDeviceSize alignment,
7178  const VmaAllocationCreateInfo& createInfo,
7179  VmaSuballocationType suballocType,
7180  VmaAllocation* pAllocation);
7181 
7182  // To be used only without CAN_MAKE_OTHER_LOST flag.
7183  VkResult AllocateFromBlock(
7184  VmaDeviceMemoryBlock* pBlock,
7185  uint32_t currentFrameIndex,
7186  VkDeviceSize size,
7187  VkDeviceSize alignment,
7188  VmaAllocationCreateFlags allocFlags,
7189  void* pUserData,
7190  VmaSuballocationType suballocType,
7191  uint32_t strategy,
7192  VmaAllocation* pAllocation);
7193 
7194  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
7195 
7196  // Saves result to pCtx->res.
7197  void ApplyDefragmentationMovesCpu(
7198  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7199  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
7200  // Saves result to pCtx->res.
7201  void ApplyDefragmentationMovesGpu(
7202  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7203  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7204  VkCommandBuffer commandBuffer);
7205 
7206  /*
7207  Used during defragmentation. pDefragmentationStats is optional. It's in/out
7208  - updated with new data.
7209  */
7210  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
7211 
7212  void UpdateHasEmptyBlock();
7213 };
7214 
7215 struct VmaPool_T
7216 {
7217  VMA_CLASS_NO_COPY(VmaPool_T)
7218 public:
7219  VmaBlockVector m_BlockVector;
7220 
7221  VmaPool_T(
7222  VmaAllocator hAllocator,
7223  const VmaPoolCreateInfo& createInfo,
7224  VkDeviceSize preferredBlockSize);
7225  ~VmaPool_T();
7226 
7227  uint32_t GetId() const { return m_Id; }
7228  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
7229 
7230  const char* GetName() const { return m_Name; }
7231  void SetName(const char* pName);
7232 
7233 #if VMA_STATS_STRING_ENABLED
7234  //void PrintDetailedMap(class VmaStringBuilder& sb);
7235 #endif
7236 
7237 private:
7238  uint32_t m_Id;
7239  char* m_Name;
7240 };
7241 
7242 /*
7243 Performs defragmentation:
7244 
7245 - Updates `pBlockVector->m_pMetadata`.
7246 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
7247 - Does not move actual data, only returns requested moves as `moves`.
7248 */
7249 class VmaDefragmentationAlgorithm
7250 {
7251  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
7252 public:
7253  VmaDefragmentationAlgorithm(
7254  VmaAllocator hAllocator,
7255  VmaBlockVector* pBlockVector,
7256  uint32_t currentFrameIndex) :
7257  m_hAllocator(hAllocator),
7258  m_pBlockVector(pBlockVector),
7259  m_CurrentFrameIndex(currentFrameIndex)
7260  {
7261  }
7262  virtual ~VmaDefragmentationAlgorithm()
7263  {
7264  }
7265 
7266  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
7267  virtual void AddAll() = 0;
7268 
7269  virtual VkResult Defragment(
7270  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7271  VkDeviceSize maxBytesToMove,
7272  uint32_t maxAllocationsToMove,
7273  VmaDefragmentationFlags flags) = 0;
7274 
7275  virtual VkDeviceSize GetBytesMoved() const = 0;
7276  virtual uint32_t GetAllocationsMoved() const = 0;
7277 
7278 protected:
7279  VmaAllocator const m_hAllocator;
7280  VmaBlockVector* const m_pBlockVector;
7281  const uint32_t m_CurrentFrameIndex;
7282 
7283  struct AllocationInfo
7284  {
7285  VmaAllocation m_hAllocation;
7286  VkBool32* m_pChanged;
7287 
7288  AllocationInfo() :
7289  m_hAllocation(VK_NULL_HANDLE),
7290  m_pChanged(VMA_NULL)
7291  {
7292  }
7293  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
7294  m_hAllocation(hAlloc),
7295  m_pChanged(pChanged)
7296  {
7297  }
7298  };
7299 };
7300 
7301 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
7302 {
7303  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
7304 public:
7305  VmaDefragmentationAlgorithm_Generic(
7306  VmaAllocator hAllocator,
7307  VmaBlockVector* pBlockVector,
7308  uint32_t currentFrameIndex,
7309  bool overlappingMoveSupported);
7310  virtual ~VmaDefragmentationAlgorithm_Generic();
7311 
7312  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7313  virtual void AddAll() { m_AllAllocations = true; }
7314 
7315  virtual VkResult Defragment(
7316  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7317  VkDeviceSize maxBytesToMove,
7318  uint32_t maxAllocationsToMove,
7319  VmaDefragmentationFlags flags);
7320 
7321  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7322  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7323 
7324 private:
7325  uint32_t m_AllocationCount;
7326  bool m_AllAllocations;
7327 
7328  VkDeviceSize m_BytesMoved;
7329  uint32_t m_AllocationsMoved;
7330 
7331  struct AllocationInfoSizeGreater
7332  {
7333  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7334  {
7335  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
7336  }
7337  };
7338 
7339  struct AllocationInfoOffsetGreater
7340  {
7341  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7342  {
7343  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
7344  }
7345  };
7346 
7347  struct BlockInfo
7348  {
7349  size_t m_OriginalBlockIndex;
7350  VmaDeviceMemoryBlock* m_pBlock;
7351  bool m_HasNonMovableAllocations;
7352  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
7353 
7354  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
7355  m_OriginalBlockIndex(SIZE_MAX),
7356  m_pBlock(VMA_NULL),
7357  m_HasNonMovableAllocations(true),
7358  m_Allocations(pAllocationCallbacks)
7359  {
7360  }
7361 
7362  void CalcHasNonMovableAllocations()
7363  {
7364  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
7365  const size_t defragmentAllocCount = m_Allocations.size();
7366  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
7367  }
7368 
7369  void SortAllocationsBySizeDescending()
7370  {
7371  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
7372  }
7373 
7374  void SortAllocationsByOffsetDescending()
7375  {
7376  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
7377  }
7378  };
7379 
7380  struct BlockPointerLess
7381  {
7382  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
7383  {
7384  return pLhsBlockInfo->m_pBlock < pRhsBlock;
7385  }
7386  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7387  {
7388  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
7389  }
7390  };
7391 
7392  // 1. Blocks with some non-movable allocations go first.
7393  // 2. Blocks with smaller sumFreeSize go first.
7394  struct BlockInfoCompareMoveDestination
7395  {
7396  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7397  {
7398  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
7399  {
7400  return true;
7401  }
7402  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
7403  {
7404  return false;
7405  }
7406  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
7407  {
7408  return true;
7409  }
7410  return false;
7411  }
7412  };
7413 
7414  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
7415  BlockInfoVector m_Blocks;
7416 
7417  VkResult DefragmentRound(
7418  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7419  VkDeviceSize maxBytesToMove,
7420  uint32_t maxAllocationsToMove,
7421  bool freeOldAllocations);
7422 
7423  size_t CalcBlocksWithNonMovableCount() const;
7424 
7425  static bool MoveMakesSense(
7426  size_t dstBlockIndex, VkDeviceSize dstOffset,
7427  size_t srcBlockIndex, VkDeviceSize srcOffset);
7428 };
7429 
7430 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
7431 {
7432  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
7433 public:
7434  VmaDefragmentationAlgorithm_Fast(
7435  VmaAllocator hAllocator,
7436  VmaBlockVector* pBlockVector,
7437  uint32_t currentFrameIndex,
7438  bool overlappingMoveSupported);
7439  virtual ~VmaDefragmentationAlgorithm_Fast();
7440 
7441  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
7442  virtual void AddAll() { m_AllAllocations = true; }
7443 
7444  virtual VkResult Defragment(
7445  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7446  VkDeviceSize maxBytesToMove,
7447  uint32_t maxAllocationsToMove,
7448  VmaDefragmentationFlags flags);
7449 
7450  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7451  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7452 
7453 private:
7454  struct BlockInfo
7455  {
7456  size_t origBlockIndex;
7457  };
7458 
7459  class FreeSpaceDatabase
7460  {
7461  public:
7462  FreeSpaceDatabase()
7463  {
7464  FreeSpace s = {};
7465  s.blockInfoIndex = SIZE_MAX;
7466  for(size_t i = 0; i < MAX_COUNT; ++i)
7467  {
7468  m_FreeSpaces[i] = s;
7469  }
7470  }
7471 
7472  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
7473  {
7474  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7475  {
7476  return;
7477  }
7478 
7479  // Find first invalid or the smallest structure.
7480  size_t bestIndex = SIZE_MAX;
7481  for(size_t i = 0; i < MAX_COUNT; ++i)
7482  {
7483  // Empty structure.
7484  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
7485  {
7486  bestIndex = i;
7487  break;
7488  }
7489  if(m_FreeSpaces[i].size < size &&
7490  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
7491  {
7492  bestIndex = i;
7493  }
7494  }
7495 
7496  if(bestIndex != SIZE_MAX)
7497  {
7498  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
7499  m_FreeSpaces[bestIndex].offset = offset;
7500  m_FreeSpaces[bestIndex].size = size;
7501  }
7502  }
7503 
7504  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
7505  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
7506  {
7507  size_t bestIndex = SIZE_MAX;
7508  VkDeviceSize bestFreeSpaceAfter = 0;
7509  for(size_t i = 0; i < MAX_COUNT; ++i)
7510  {
7511  // Structure is valid.
7512  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
7513  {
7514  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
7515  // Allocation fits into this structure.
7516  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
7517  {
7518  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
7519  (dstOffset + size);
7520  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
7521  {
7522  bestIndex = i;
7523  bestFreeSpaceAfter = freeSpaceAfter;
7524  }
7525  }
7526  }
7527  }
7528 
7529  if(bestIndex != SIZE_MAX)
7530  {
7531  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
7532  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
7533 
7534  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7535  {
7536  // Leave this structure for remaining empty space.
7537  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
7538  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
7539  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
7540  }
7541  else
7542  {
7543  // This structure becomes invalid.
7544  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
7545  }
7546 
7547  return true;
7548  }
7549 
7550  return false;
7551  }
7552 
7553  private:
7554  static const size_t MAX_COUNT = 4;
7555 
7556  struct FreeSpace
7557  {
7558  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
7559  VkDeviceSize offset;
7560  VkDeviceSize size;
7561  } m_FreeSpaces[MAX_COUNT];
7562  };
7563 
7564  const bool m_OverlappingMoveSupported;
7565 
7566  uint32_t m_AllocationCount;
7567  bool m_AllAllocations;
7568 
7569  VkDeviceSize m_BytesMoved;
7570  uint32_t m_AllocationsMoved;
7571 
7572  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
7573 
7574  void PreprocessMetadata();
7575  void PostprocessMetadata();
7576  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
7577 };
7578 
7579 struct VmaBlockDefragmentationContext
7580 {
7581  enum BLOCK_FLAG
7582  {
7583  BLOCK_FLAG_USED = 0x00000001,
7584  };
7585  uint32_t flags;
7586  VkBuffer hBuffer;
7587 };
7588 
7589 class VmaBlockVectorDefragmentationContext
7590 {
7591  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
7592 public:
7593  VkResult res;
7594  bool mutexLocked;
7595  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
7596  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
7597  uint32_t defragmentationMovesProcessed;
7598  uint32_t defragmentationMovesCommitted;
7599  bool hasDefragmentationPlan;
7600 
7601  VmaBlockVectorDefragmentationContext(
7602  VmaAllocator hAllocator,
7603  VmaPool hCustomPool, // Optional.
7604  VmaBlockVector* pBlockVector,
7605  uint32_t currFrameIndex);
7606  ~VmaBlockVectorDefragmentationContext();
7607 
7608  VmaPool GetCustomPool() const { return m_hCustomPool; }
7609  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7610  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7611 
7612  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7613  void AddAll() { m_AllAllocations = true; }
7614 
7615  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7616 
7617 private:
7618  const VmaAllocator m_hAllocator;
7619  // Null if not from custom pool.
7620  const VmaPool m_hCustomPool;
7621  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7622  VmaBlockVector* const m_pBlockVector;
7623  const uint32_t m_CurrFrameIndex;
7624  // Owner of this object.
7625  VmaDefragmentationAlgorithm* m_pAlgorithm;
7626 
7627  struct AllocInfo
7628  {
7629  VmaAllocation hAlloc;
7630  VkBool32* pChanged;
7631  };
7632  // Used between constructor and Begin.
7633  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7634  bool m_AllAllocations;
7635 };
7636 
7637 struct VmaDefragmentationContext_T
7638 {
7639 private:
7640  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7641 public:
7642  VmaDefragmentationContext_T(
7643  VmaAllocator hAllocator,
7644  uint32_t currFrameIndex,
7645  uint32_t flags,
7646  VmaDefragmentationStats* pStats);
7647  ~VmaDefragmentationContext_T();
7648 
7649  void AddPools(uint32_t poolCount, const VmaPool* pPools);
7650  void AddAllocations(
7651  uint32_t allocationCount,
7652  const VmaAllocation* pAllocations,
7653  VkBool32* pAllocationsChanged);
7654 
7655  /*
7656  Returns:
7657  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7658  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7659  - Negative value if error occured and object can be destroyed immediately.
7660  */
7661  VkResult Defragment(
7662  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
7663  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
7664  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
7665 
7666  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
7667  VkResult DefragmentPassEnd();
7668 
7669 private:
7670  const VmaAllocator m_hAllocator;
7671  const uint32_t m_CurrFrameIndex;
7672  const uint32_t m_Flags;
7673  VmaDefragmentationStats* const m_pStats;
7674 
7675  VkDeviceSize m_MaxCpuBytesToMove;
7676  uint32_t m_MaxCpuAllocationsToMove;
7677  VkDeviceSize m_MaxGpuBytesToMove;
7678  uint32_t m_MaxGpuAllocationsToMove;
7679 
7680  // Owner of these objects.
7681  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
7682  // Owner of these objects.
7683  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
7684 };
7685 
7686 #if VMA_RECORDING_ENABLED
7687 
7688 class VmaRecorder
7689 {
7690 public:
7691  VmaRecorder();
7692  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
7693  void WriteConfiguration(
7694  const VkPhysicalDeviceProperties& devProps,
7695  const VkPhysicalDeviceMemoryProperties& memProps,
7696  uint32_t vulkanApiVersion,
7697  bool dedicatedAllocationExtensionEnabled,
7698  bool bindMemory2ExtensionEnabled,
7699  bool memoryBudgetExtensionEnabled,
7700  bool deviceCoherentMemoryExtensionEnabled);
7701  ~VmaRecorder();
7702 
7703  void RecordCreateAllocator(uint32_t frameIndex);
7704  void RecordDestroyAllocator(uint32_t frameIndex);
7705  void RecordCreatePool(uint32_t frameIndex,
7706  const VmaPoolCreateInfo& createInfo,
7707  VmaPool pool);
7708  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
7709  void RecordAllocateMemory(uint32_t frameIndex,
7710  const VkMemoryRequirements& vkMemReq,
7711  const VmaAllocationCreateInfo& createInfo,
7712  VmaAllocation allocation);
7713  void RecordAllocateMemoryPages(uint32_t frameIndex,
7714  const VkMemoryRequirements& vkMemReq,
7715  const VmaAllocationCreateInfo& createInfo,
7716  uint64_t allocationCount,
7717  const VmaAllocation* pAllocations);
7718  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
7719  const VkMemoryRequirements& vkMemReq,
7720  bool requiresDedicatedAllocation,
7721  bool prefersDedicatedAllocation,
7722  const VmaAllocationCreateInfo& createInfo,
7723  VmaAllocation allocation);
7724  void RecordAllocateMemoryForImage(uint32_t frameIndex,
7725  const VkMemoryRequirements& vkMemReq,
7726  bool requiresDedicatedAllocation,
7727  bool prefersDedicatedAllocation,
7728  const VmaAllocationCreateInfo& createInfo,
7729  VmaAllocation allocation);
7730  void RecordFreeMemory(uint32_t frameIndex,
7731  VmaAllocation allocation);
7732  void RecordFreeMemoryPages(uint32_t frameIndex,
7733  uint64_t allocationCount,
7734  const VmaAllocation* pAllocations);
7735  void RecordSetAllocationUserData(uint32_t frameIndex,
7736  VmaAllocation allocation,
7737  const void* pUserData);
7738  void RecordCreateLostAllocation(uint32_t frameIndex,
7739  VmaAllocation allocation);
7740  void RecordMapMemory(uint32_t frameIndex,
7741  VmaAllocation allocation);
7742  void RecordUnmapMemory(uint32_t frameIndex,
7743  VmaAllocation allocation);
7744  void RecordFlushAllocation(uint32_t frameIndex,
7745  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7746  void RecordInvalidateAllocation(uint32_t frameIndex,
7747  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7748  void RecordCreateBuffer(uint32_t frameIndex,
7749  const VkBufferCreateInfo& bufCreateInfo,
7750  const VmaAllocationCreateInfo& allocCreateInfo,
7751  VmaAllocation allocation);
7752  void RecordCreateImage(uint32_t frameIndex,
7753  const VkImageCreateInfo& imageCreateInfo,
7754  const VmaAllocationCreateInfo& allocCreateInfo,
7755  VmaAllocation allocation);
7756  void RecordDestroyBuffer(uint32_t frameIndex,
7757  VmaAllocation allocation);
7758  void RecordDestroyImage(uint32_t frameIndex,
7759  VmaAllocation allocation);
7760  void RecordTouchAllocation(uint32_t frameIndex,
7761  VmaAllocation allocation);
7762  void RecordGetAllocationInfo(uint32_t frameIndex,
7763  VmaAllocation allocation);
7764  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
7765  VmaPool pool);
7766  void RecordDefragmentationBegin(uint32_t frameIndex,
7767  const VmaDefragmentationInfo2& info,
7769  void RecordDefragmentationEnd(uint32_t frameIndex,
7771  void RecordSetPoolName(uint32_t frameIndex,
7772  VmaPool pool,
7773  const char* name);
7774 
7775 private:
7776  struct CallParams
7777  {
7778  uint32_t threadId;
7779  double time;
7780  };
7781 
7782  class UserDataString
7783  {
7784  public:
7785  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
7786  const char* GetString() const { return m_Str; }
7787 
7788  private:
7789  char m_PtrStr[17];
7790  const char* m_Str;
7791  };
7792 
7793  bool m_UseMutex;
7794  VmaRecordFlags m_Flags;
7795  FILE* m_File;
7796  VMA_MUTEX m_FileMutex;
7797  std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
7798 
7799  void GetBasicParams(CallParams& outParams);
7800 
7801  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
7802  template<typename T>
7803  void PrintPointerList(uint64_t count, const T* pItems)
7804  {
7805  if(count)
7806  {
7807  fprintf(m_File, "%p", pItems[0]);
7808  for(uint64_t i = 1; i < count; ++i)
7809  {
7810  fprintf(m_File, " %p", pItems[i]);
7811  }
7812  }
7813  }
7814 
7815  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
7816  void Flush();
7817 };
7818 
7819 #endif // #if VMA_RECORDING_ENABLED
7820 
7821 /*
7822 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
7823 */
7824 class VmaAllocationObjectAllocator
7825 {
7826  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
7827 public:
7828  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
7829 
7830  template<typename... Types> VmaAllocation Allocate(Types... args);
7831  void Free(VmaAllocation hAlloc);
7832 
7833 private:
7834  VMA_MUTEX m_Mutex;
7835  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7836 };
7837 
7838 struct VmaCurrentBudgetData
7839 {
7840  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7841  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7842 
7843 #if VMA_MEMORY_BUDGET
7844  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7845  VMA_RW_MUTEX m_BudgetMutex;
7846  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7847  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7848  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7849 #endif // #if VMA_MEMORY_BUDGET
7850 
7851  VmaCurrentBudgetData()
7852  {
7853  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7854  {
7855  m_BlockBytes[heapIndex] = 0;
7856  m_AllocationBytes[heapIndex] = 0;
7857 #if VMA_MEMORY_BUDGET
7858  m_VulkanUsage[heapIndex] = 0;
7859  m_VulkanBudget[heapIndex] = 0;
7860  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7861 #endif
7862  }
7863 
7864 #if VMA_MEMORY_BUDGET
7865  m_OperationsSinceBudgetFetch = 0;
7866 #endif
7867  }
7868 
7869  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7870  {
7871  m_AllocationBytes[heapIndex] += allocationSize;
7872 #if VMA_MEMORY_BUDGET
7873  ++m_OperationsSinceBudgetFetch;
7874 #endif
7875  }
7876 
7877  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7878  {
7879  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7880  m_AllocationBytes[heapIndex] -= allocationSize;
7881 #if VMA_MEMORY_BUDGET
7882  ++m_OperationsSinceBudgetFetch;
7883 #endif
7884  }
7885 };
7886 
7887 // Main allocator object.
7888 struct VmaAllocator_T
7889 {
7890  VMA_CLASS_NO_COPY(VmaAllocator_T)
7891 public:
7892  bool m_UseMutex;
7893  uint32_t m_VulkanApiVersion;
7894  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7895  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7896  bool m_UseExtMemoryBudget;
7897  bool m_UseAmdDeviceCoherentMemory;
7898  bool m_UseKhrBufferDeviceAddress;
7899  bool m_UseExtMemoryPriority;
7900  VkDevice m_hDevice;
7901  VkInstance m_hInstance;
7902  bool m_AllocationCallbacksSpecified;
7903  VkAllocationCallbacks m_AllocationCallbacks;
7904  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7905  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7906 
7907  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7908  uint32_t m_HeapSizeLimitMask;
7909 
7910  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7911  VkPhysicalDeviceMemoryProperties m_MemProps;
7912 
7913  // Default pools.
7914  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7915 
7916  // Each vector is sorted by memory (handle value).
7917  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7918  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7919  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7920 
7921  VmaCurrentBudgetData m_Budget;
7922 
7923  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7924  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7925  ~VmaAllocator_T();
7926 
7927  const VkAllocationCallbacks* GetAllocationCallbacks() const
7928  {
7929  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7930  }
7931  const VmaVulkanFunctions& GetVulkanFunctions() const
7932  {
7933  return m_VulkanFunctions;
7934  }
7935 
7936  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
7937 
7938  VkDeviceSize GetBufferImageGranularity() const
7939  {
7940  return VMA_MAX(
7941  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7942  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7943  }
7944 
7945  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7946  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7947 
7948  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7949  {
7950  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7951  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7952  }
7953  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7954  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7955  {
7956  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7957  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7958  }
7959  // Minimum alignment for all allocations in specific memory type.
7960  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7961  {
7962  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7963  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7964  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7965  }
7966 
7967  bool IsIntegratedGpu() const
7968  {
7969  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7970  }
7971 
7972  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
7973 
7974 #if VMA_RECORDING_ENABLED
7975  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7976 #endif
7977 
7978  void GetBufferMemoryRequirements(
7979  VkBuffer hBuffer,
7980  VkMemoryRequirements& memReq,
7981  bool& requiresDedicatedAllocation,
7982  bool& prefersDedicatedAllocation) const;
7983  void GetImageMemoryRequirements(
7984  VkImage hImage,
7985  VkMemoryRequirements& memReq,
7986  bool& requiresDedicatedAllocation,
7987  bool& prefersDedicatedAllocation) const;
7988 
7989  // Main allocation function.
7990  VkResult AllocateMemory(
7991  const VkMemoryRequirements& vkMemReq,
7992  bool requiresDedicatedAllocation,
7993  bool prefersDedicatedAllocation,
7994  VkBuffer dedicatedBuffer,
7995  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
7996  VkImage dedicatedImage,
7997  const VmaAllocationCreateInfo& createInfo,
7998  VmaSuballocationType suballocType,
7999  size_t allocationCount,
8000  VmaAllocation* pAllocations);
8001 
8002  // Main deallocation function.
8003  void FreeMemory(
8004  size_t allocationCount,
8005  const VmaAllocation* pAllocations);
8006 
8007  VkResult ResizeAllocation(
8008  const VmaAllocation alloc,
8009  VkDeviceSize newSize);
8010 
8011  void CalculateStats(VmaStats* pStats);
8012 
8013  void GetBudget(
8014  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
8015 
8016 #if VMA_STATS_STRING_ENABLED
8017  void PrintDetailedMap(class VmaJsonWriter& json);
8018 #endif
8019 
8020  VkResult DefragmentationBegin(
8021  const VmaDefragmentationInfo2& info,
8022  VmaDefragmentationStats* pStats,
8023  VmaDefragmentationContext* pContext);
8024  VkResult DefragmentationEnd(
8025  VmaDefragmentationContext context);
8026 
8027  VkResult DefragmentationPassBegin(
8029  VmaDefragmentationContext context);
8030  VkResult DefragmentationPassEnd(
8031  VmaDefragmentationContext context);
8032 
8033  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
8034  bool TouchAllocation(VmaAllocation hAllocation);
8035 
8036  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
8037  void DestroyPool(VmaPool pool);
8038  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
8039 
8040  void SetCurrentFrameIndex(uint32_t frameIndex);
8041  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
8042 
8043  void MakePoolAllocationsLost(
8044  VmaPool hPool,
8045  size_t* pLostAllocationCount);
8046  VkResult CheckPoolCorruption(VmaPool hPool);
8047  VkResult CheckCorruption(uint32_t memoryTypeBits);
8048 
8049  void CreateLostAllocation(VmaAllocation* pAllocation);
8050 
8051  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
8052  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
8053  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
8054  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
8055  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
8056  VkResult BindVulkanBuffer(
8057  VkDeviceMemory memory,
8058  VkDeviceSize memoryOffset,
8059  VkBuffer buffer,
8060  const void* pNext);
8061  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
8062  VkResult BindVulkanImage(
8063  VkDeviceMemory memory,
8064  VkDeviceSize memoryOffset,
8065  VkImage image,
8066  const void* pNext);
8067 
8068  VkResult Map(VmaAllocation hAllocation, void** ppData);
8069  void Unmap(VmaAllocation hAllocation);
8070 
8071  VkResult BindBufferMemory(
8072  VmaAllocation hAllocation,
8073  VkDeviceSize allocationLocalOffset,
8074  VkBuffer hBuffer,
8075  const void* pNext);
8076  VkResult BindImageMemory(
8077  VmaAllocation hAllocation,
8078  VkDeviceSize allocationLocalOffset,
8079  VkImage hImage,
8080  const void* pNext);
8081 
8082  VkResult FlushOrInvalidateAllocation(
8083  VmaAllocation hAllocation,
8084  VkDeviceSize offset, VkDeviceSize size,
8085  VMA_CACHE_OPERATION op);
8086  VkResult FlushOrInvalidateAllocations(
8087  uint32_t allocationCount,
8088  const VmaAllocation* allocations,
8089  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
8090  VMA_CACHE_OPERATION op);
8091 
8092  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
8093 
8094  /*
8095  Returns bit mask of memory types that can support defragmentation on GPU as
8096  they support creation of required buffer for copy operations.
8097  */
8098  uint32_t GetGpuDefragmentationMemoryTypeBits();
8099 
8100 private:
8101  VkDeviceSize m_PreferredLargeHeapBlockSize;
8102 
8103  VkPhysicalDevice m_PhysicalDevice;
8104  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
8105  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
8106 
8107  VMA_RW_MUTEX m_PoolsMutex;
8108  // Protected by m_PoolsMutex. Sorted by pointer value.
8109  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
8110  uint32_t m_NextPoolId;
8111 
8112  VmaVulkanFunctions m_VulkanFunctions;
8113 
8114  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
8115  uint32_t m_GlobalMemoryTypeBits;
8116 
8117 #if VMA_RECORDING_ENABLED
8118  VmaRecorder* m_pRecorder;
8119 #endif
8120 
8121  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
8122 
8123 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
8124  void ImportVulkanFunctions_Static();
8125 #endif
8126 
8127  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
8128 
8129 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
8130  void ImportVulkanFunctions_Dynamic();
8131 #endif
8132 
8133  void ValidateVulkanFunctions();
8134 
8135  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
8136 
8137  VkResult AllocateMemoryOfType(
8138  VkDeviceSize size,
8139  VkDeviceSize alignment,
8140  bool dedicatedAllocation,
8141  VkBuffer dedicatedBuffer,
8142  VkBufferUsageFlags dedicatedBufferUsage,
8143  VkImage dedicatedImage,
8144  const VmaAllocationCreateInfo& createInfo,
8145  uint32_t memTypeIndex,
8146  VmaSuballocationType suballocType,
8147  size_t allocationCount,
8148  VmaAllocation* pAllocations);
8149 
8150  // Helper function only to be used inside AllocateDedicatedMemory.
8151  VkResult AllocateDedicatedMemoryPage(
8152  VkDeviceSize size,
8153  VmaSuballocationType suballocType,
8154  uint32_t memTypeIndex,
8155  const VkMemoryAllocateInfo& allocInfo,
8156  bool map,
8157  bool isUserDataString,
8158  void* pUserData,
8159  VmaAllocation* pAllocation);
8160 
8161  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
8162  VkResult AllocateDedicatedMemory(
8163  VkDeviceSize size,
8164  VmaSuballocationType suballocType,
8165  uint32_t memTypeIndex,
8166  bool withinBudget,
8167  bool map,
8168  bool isUserDataString,
8169  void* pUserData,
8170  float priority,
8171  VkBuffer dedicatedBuffer,
8172  VkBufferUsageFlags dedicatedBufferUsage,
8173  VkImage dedicatedImage,
8174  size_t allocationCount,
8175  VmaAllocation* pAllocations);
8176 
8177  void FreeDedicatedMemory(const VmaAllocation allocation);
8178 
8179  /*
8180  Calculates and returns bit mask of memory types that can support defragmentation
8181  on GPU as they support creation of required buffer for copy operations.
8182  */
8183  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
8184 
8185  uint32_t CalculateGlobalMemoryTypeBits() const;
8186 
8187  bool GetFlushOrInvalidateRange(
8188  VmaAllocation allocation,
8189  VkDeviceSize offset, VkDeviceSize size,
8190  VkMappedMemoryRange& outRange) const;
8191 
8192 #if VMA_MEMORY_BUDGET
8193  void UpdateVulkanBudget();
8194 #endif // #if VMA_MEMORY_BUDGET
8195 };
8196 
8198 // Memory allocation #2 after VmaAllocator_T definition
8199 
8200 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
8201 {
8202  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
8203 }
8204 
8205 static void VmaFree(VmaAllocator hAllocator, void* ptr)
8206 {
8207  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
8208 }
8209 
8210 template<typename T>
8211 static T* VmaAllocate(VmaAllocator hAllocator)
8212 {
8213  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
8214 }
8215 
8216 template<typename T>
8217 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
8218 {
8219  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
8220 }
8221 
8222 template<typename T>
8223 static void vma_delete(VmaAllocator hAllocator, T* ptr)
8224 {
8225  if(ptr != VMA_NULL)
8226  {
8227  ptr->~T();
8228  VmaFree(hAllocator, ptr);
8229  }
8230 }
8231 
8232 template<typename T>
8233 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
8234 {
8235  if(ptr != VMA_NULL)
8236  {
8237  for(size_t i = count; i--; )
8238  ptr[i].~T();
8239  VmaFree(hAllocator, ptr);
8240  }
8241 }
8242 
8244 // VmaStringBuilder
8245 
8246 #if VMA_STATS_STRING_ENABLED
8247 
8248 class VmaStringBuilder
8249 {
8250 public:
8251  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
8252  size_t GetLength() const { return m_Data.size(); }
8253  const char* GetData() const { return m_Data.data(); }
8254 
8255  void Add(char ch) { m_Data.push_back(ch); }
8256  void Add(const char* pStr);
8257  void AddNewLine() { Add('\n'); }
8258  void AddNumber(uint32_t num);
8259  void AddNumber(uint64_t num);
8260  void AddPointer(const void* ptr);
8261 
8262 private:
8263  VmaVector< char, VmaStlAllocator<char> > m_Data;
8264 };
8265 
8266 void VmaStringBuilder::Add(const char* pStr)
8267 {
8268  const size_t strLen = strlen(pStr);
8269  if(strLen > 0)
8270  {
8271  const size_t oldCount = m_Data.size();
8272  m_Data.resize(oldCount + strLen);
8273  memcpy(m_Data.data() + oldCount, pStr, strLen);
8274  }
8275 }
8276 
8277 void VmaStringBuilder::AddNumber(uint32_t num)
8278 {
8279  char buf[11];
8280  buf[10] = '\0';
8281  char *p = &buf[10];
8282  do
8283  {
8284  *--p = '0' + (num % 10);
8285  num /= 10;
8286  }
8287  while(num);
8288  Add(p);
8289 }
8290 
8291 void VmaStringBuilder::AddNumber(uint64_t num)
8292 {
8293  char buf[21];
8294  buf[20] = '\0';
8295  char *p = &buf[20];
8296  do
8297  {
8298  *--p = '0' + (num % 10);
8299  num /= 10;
8300  }
8301  while(num);
8302  Add(p);
8303 }
8304 
8305 void VmaStringBuilder::AddPointer(const void* ptr)
8306 {
8307  char buf[21];
8308  VmaPtrToStr(buf, sizeof(buf), ptr);
8309  Add(buf);
8310 }
8311 
8312 #endif // #if VMA_STATS_STRING_ENABLED
8313 
8315 // VmaJsonWriter
8316 
8317 #if VMA_STATS_STRING_ENABLED
8318 
8319 class VmaJsonWriter
8320 {
8321  VMA_CLASS_NO_COPY(VmaJsonWriter)
8322 public:
8323  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
8324  ~VmaJsonWriter();
8325 
8326  void BeginObject(bool singleLine = false);
8327  void EndObject();
8328 
8329  void BeginArray(bool singleLine = false);
8330  void EndArray();
8331 
8332  void WriteString(const char* pStr);
8333  void BeginString(const char* pStr = VMA_NULL);
8334  void ContinueString(const char* pStr);
8335  void ContinueString(uint32_t n);
8336  void ContinueString(uint64_t n);
8337  void ContinueString_Pointer(const void* ptr);
8338  void EndString(const char* pStr = VMA_NULL);
8339 
8340  void WriteNumber(uint32_t n);
8341  void WriteNumber(uint64_t n);
8342  void WriteBool(bool b);
8343  void WriteNull();
8344 
8345 private:
8346  static const char* const INDENT;
8347 
8348  enum COLLECTION_TYPE
8349  {
8350  COLLECTION_TYPE_OBJECT,
8351  COLLECTION_TYPE_ARRAY,
8352  };
8353  struct StackItem
8354  {
8355  COLLECTION_TYPE type;
8356  uint32_t valueCount;
8357  bool singleLineMode;
8358  };
8359 
8360  VmaStringBuilder& m_SB;
8361  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
8362  bool m_InsideString;
8363 
8364  void BeginValue(bool isString);
8365  void WriteIndent(bool oneLess = false);
8366 };
8367 
8368 const char* const VmaJsonWriter::INDENT = " ";
8369 
8370 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
8371  m_SB(sb),
8372  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
8373  m_InsideString(false)
8374 {
8375 }
8376 
8377 VmaJsonWriter::~VmaJsonWriter()
8378 {
8379  VMA_ASSERT(!m_InsideString);
8380  VMA_ASSERT(m_Stack.empty());
8381 }
8382 
8383 void VmaJsonWriter::BeginObject(bool singleLine)
8384 {
8385  VMA_ASSERT(!m_InsideString);
8386 
8387  BeginValue(false);
8388  m_SB.Add('{');
8389 
8390  StackItem item;
8391  item.type = COLLECTION_TYPE_OBJECT;
8392  item.valueCount = 0;
8393  item.singleLineMode = singleLine;
8394  m_Stack.push_back(item);
8395 }
8396 
8397 void VmaJsonWriter::EndObject()
8398 {
8399  VMA_ASSERT(!m_InsideString);
8400 
8401  WriteIndent(true);
8402  m_SB.Add('}');
8403 
8404  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
8405  m_Stack.pop_back();
8406 }
8407 
8408 void VmaJsonWriter::BeginArray(bool singleLine)
8409 {
8410  VMA_ASSERT(!m_InsideString);
8411 
8412  BeginValue(false);
8413  m_SB.Add('[');
8414 
8415  StackItem item;
8416  item.type = COLLECTION_TYPE_ARRAY;
8417  item.valueCount = 0;
8418  item.singleLineMode = singleLine;
8419  m_Stack.push_back(item);
8420 }
8421 
8422 void VmaJsonWriter::EndArray()
8423 {
8424  VMA_ASSERT(!m_InsideString);
8425 
8426  WriteIndent(true);
8427  m_SB.Add(']');
8428 
8429  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
8430  m_Stack.pop_back();
8431 }
8432 
8433 void VmaJsonWriter::WriteString(const char* pStr)
8434 {
8435  BeginString(pStr);
8436  EndString();
8437 }
8438 
8439 void VmaJsonWriter::BeginString(const char* pStr)
8440 {
8441  VMA_ASSERT(!m_InsideString);
8442 
8443  BeginValue(true);
8444  m_SB.Add('"');
8445  m_InsideString = true;
8446  if(pStr != VMA_NULL && pStr[0] != '\0')
8447  {
8448  ContinueString(pStr);
8449  }
8450 }
8451 
8452 void VmaJsonWriter::ContinueString(const char* pStr)
8453 {
8454  VMA_ASSERT(m_InsideString);
8455 
8456  const size_t strLen = strlen(pStr);
8457  for(size_t i = 0; i < strLen; ++i)
8458  {
8459  char ch = pStr[i];
8460  if(ch == '\\')
8461  {
8462  m_SB.Add("\\\\");
8463  }
8464  else if(ch == '"')
8465  {
8466  m_SB.Add("\\\"");
8467  }
8468  else if(ch >= 32)
8469  {
8470  m_SB.Add(ch);
8471  }
8472  else switch(ch)
8473  {
8474  case '\b':
8475  m_SB.Add("\\b");
8476  break;
8477  case '\f':
8478  m_SB.Add("\\f");
8479  break;
8480  case '\n':
8481  m_SB.Add("\\n");
8482  break;
8483  case '\r':
8484  m_SB.Add("\\r");
8485  break;
8486  case '\t':
8487  m_SB.Add("\\t");
8488  break;
8489  default:
8490  VMA_ASSERT(0 && "Character not currently supported.");
8491  break;
8492  }
8493  }
8494 }
8495 
8496 void VmaJsonWriter::ContinueString(uint32_t n)
8497 {
8498  VMA_ASSERT(m_InsideString);
8499  m_SB.AddNumber(n);
8500 }
8501 
8502 void VmaJsonWriter::ContinueString(uint64_t n)
8503 {
8504  VMA_ASSERT(m_InsideString);
8505  m_SB.AddNumber(n);
8506 }
8507 
8508 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
8509 {
8510  VMA_ASSERT(m_InsideString);
8511  m_SB.AddPointer(ptr);
8512 }
8513 
8514 void VmaJsonWriter::EndString(const char* pStr)
8515 {
8516  VMA_ASSERT(m_InsideString);
8517  if(pStr != VMA_NULL && pStr[0] != '\0')
8518  {
8519  ContinueString(pStr);
8520  }
8521  m_SB.Add('"');
8522  m_InsideString = false;
8523 }
8524 
8525 void VmaJsonWriter::WriteNumber(uint32_t n)
8526 {
8527  VMA_ASSERT(!m_InsideString);
8528  BeginValue(false);
8529  m_SB.AddNumber(n);
8530 }
8531 
8532 void VmaJsonWriter::WriteNumber(uint64_t n)
8533 {
8534  VMA_ASSERT(!m_InsideString);
8535  BeginValue(false);
8536  m_SB.AddNumber(n);
8537 }
8538 
8539 void VmaJsonWriter::WriteBool(bool b)
8540 {
8541  VMA_ASSERT(!m_InsideString);
8542  BeginValue(false);
8543  m_SB.Add(b ? "true" : "false");
8544 }
8545 
8546 void VmaJsonWriter::WriteNull()
8547 {
8548  VMA_ASSERT(!m_InsideString);
8549  BeginValue(false);
8550  m_SB.Add("null");
8551 }
8552 
8553 void VmaJsonWriter::BeginValue(bool isString)
8554 {
8555  if(!m_Stack.empty())
8556  {
8557  StackItem& currItem = m_Stack.back();
8558  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8559  currItem.valueCount % 2 == 0)
8560  {
8561  VMA_ASSERT(isString);
8562  }
8563 
8564  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8565  currItem.valueCount % 2 != 0)
8566  {
8567  m_SB.Add(": ");
8568  }
8569  else if(currItem.valueCount > 0)
8570  {
8571  m_SB.Add(", ");
8572  WriteIndent();
8573  }
8574  else
8575  {
8576  WriteIndent();
8577  }
8578  ++currItem.valueCount;
8579  }
8580 }
8581 
8582 void VmaJsonWriter::WriteIndent(bool oneLess)
8583 {
8584  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
8585  {
8586  m_SB.AddNewLine();
8587 
8588  size_t count = m_Stack.size();
8589  if(count > 0 && oneLess)
8590  {
8591  --count;
8592  }
8593  for(size_t i = 0; i < count; ++i)
8594  {
8595  m_SB.Add(INDENT);
8596  }
8597  }
8598 }
8599 
8600 #endif // #if VMA_STATS_STRING_ENABLED
8601 
8603 
8604 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
8605 {
8606  if(IsUserDataString())
8607  {
8608  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
8609 
8610  FreeUserDataString(hAllocator);
8611 
8612  if(pUserData != VMA_NULL)
8613  {
8614  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
8615  }
8616  }
8617  else
8618  {
8619  m_pUserData = pUserData;
8620  }
8621 }
8622 
8623 void VmaAllocation_T::ChangeBlockAllocation(
8624  VmaAllocator hAllocator,
8625  VmaDeviceMemoryBlock* block,
8626  VkDeviceSize offset)
8627 {
8628  VMA_ASSERT(block != VMA_NULL);
8629  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8630 
8631  // Move mapping reference counter from old block to new block.
8632  if(block != m_BlockAllocation.m_Block)
8633  {
8634  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8635  if(IsPersistentMap())
8636  ++mapRefCount;
8637  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8638  block->Map(hAllocator, mapRefCount, VMA_NULL);
8639  }
8640 
8641  m_BlockAllocation.m_Block = block;
8642  m_BlockAllocation.m_Offset = offset;
8643 }
8644 
8645 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8646 {
8647  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8648  m_BlockAllocation.m_Offset = newOffset;
8649 }
8650 
8651 VkDeviceSize VmaAllocation_T::GetOffset() const
8652 {
8653  switch(m_Type)
8654  {
8655  case ALLOCATION_TYPE_BLOCK:
8656  return m_BlockAllocation.m_Offset;
8657  case ALLOCATION_TYPE_DEDICATED:
8658  return 0;
8659  default:
8660  VMA_ASSERT(0);
8661  return 0;
8662  }
8663 }
8664 
8665 VkDeviceMemory VmaAllocation_T::GetMemory() const
8666 {
8667  switch(m_Type)
8668  {
8669  case ALLOCATION_TYPE_BLOCK:
8670  return m_BlockAllocation.m_Block->GetDeviceMemory();
8671  case ALLOCATION_TYPE_DEDICATED:
8672  return m_DedicatedAllocation.m_hMemory;
8673  default:
8674  VMA_ASSERT(0);
8675  return VK_NULL_HANDLE;
8676  }
8677 }
8678 
8679 void* VmaAllocation_T::GetMappedData() const
8680 {
8681  switch(m_Type)
8682  {
8683  case ALLOCATION_TYPE_BLOCK:
8684  if(m_MapCount != 0)
8685  {
8686  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
8687  VMA_ASSERT(pBlockData != VMA_NULL);
8688  return (char*)pBlockData + m_BlockAllocation.m_Offset;
8689  }
8690  else
8691  {
8692  return VMA_NULL;
8693  }
8694  break;
8695  case ALLOCATION_TYPE_DEDICATED:
8696  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
8697  return m_DedicatedAllocation.m_pMappedData;
8698  default:
8699  VMA_ASSERT(0);
8700  return VMA_NULL;
8701  }
8702 }
8703 
8704 bool VmaAllocation_T::CanBecomeLost() const
8705 {
8706  switch(m_Type)
8707  {
8708  case ALLOCATION_TYPE_BLOCK:
8709  return m_BlockAllocation.m_CanBecomeLost;
8710  case ALLOCATION_TYPE_DEDICATED:
8711  return false;
8712  default:
8713  VMA_ASSERT(0);
8714  return false;
8715  }
8716 }
8717 
8718 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8719 {
8720  VMA_ASSERT(CanBecomeLost());
8721 
8722  /*
8723  Warning: This is a carefully designed algorithm.
8724  Do not modify unless you really know what you're doing :)
8725  */
8726  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
8727  for(;;)
8728  {
8729  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8730  {
8731  VMA_ASSERT(0);
8732  return false;
8733  }
8734  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
8735  {
8736  return false;
8737  }
8738  else // Last use time earlier than current time.
8739  {
8740  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
8741  {
8742  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
8743  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
8744  return true;
8745  }
8746  }
8747  }
8748 }
8749 
8750 #if VMA_STATS_STRING_ENABLED
8751 
8752 // Correspond to values of enum VmaSuballocationType.
8753 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
8754  "FREE",
8755  "UNKNOWN",
8756  "BUFFER",
8757  "IMAGE_UNKNOWN",
8758  "IMAGE_LINEAR",
8759  "IMAGE_OPTIMAL",
8760 };
8761 
8762 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
8763 {
8764  json.WriteString("Type");
8765  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
8766 
8767  json.WriteString("Size");
8768  json.WriteNumber(m_Size);
8769 
8770  if(m_pUserData != VMA_NULL)
8771  {
8772  json.WriteString("UserData");
8773  if(IsUserDataString())
8774  {
8775  json.WriteString((const char*)m_pUserData);
8776  }
8777  else
8778  {
8779  json.BeginString();
8780  json.ContinueString_Pointer(m_pUserData);
8781  json.EndString();
8782  }
8783  }
8784 
8785  json.WriteString("CreationFrameIndex");
8786  json.WriteNumber(m_CreationFrameIndex);
8787 
8788  json.WriteString("LastUseFrameIndex");
8789  json.WriteNumber(GetLastUseFrameIndex());
8790 
8791  if(m_BufferImageUsage != 0)
8792  {
8793  json.WriteString("Usage");
8794  json.WriteNumber(m_BufferImageUsage);
8795  }
8796 }
8797 
8798 #endif
8799 
8800 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
8801 {
8802  VMA_ASSERT(IsUserDataString());
8803  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
8804  m_pUserData = VMA_NULL;
8805 }
8806 
8807 void VmaAllocation_T::BlockAllocMap()
8808 {
8809  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8810 
8811  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8812  {
8813  ++m_MapCount;
8814  }
8815  else
8816  {
8817  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
8818  }
8819 }
8820 
8821 void VmaAllocation_T::BlockAllocUnmap()
8822 {
8823  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8824 
8825  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8826  {
8827  --m_MapCount;
8828  }
8829  else
8830  {
8831  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
8832  }
8833 }
8834 
8835 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
8836 {
8837  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8838 
8839  if(m_MapCount != 0)
8840  {
8841  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8842  {
8843  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
8844  *ppData = m_DedicatedAllocation.m_pMappedData;
8845  ++m_MapCount;
8846  return VK_SUCCESS;
8847  }
8848  else
8849  {
8850  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
8851  return VK_ERROR_MEMORY_MAP_FAILED;
8852  }
8853  }
8854  else
8855  {
8856  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
8857  hAllocator->m_hDevice,
8858  m_DedicatedAllocation.m_hMemory,
8859  0, // offset
8860  VK_WHOLE_SIZE,
8861  0, // flags
8862  ppData);
8863  if(result == VK_SUCCESS)
8864  {
8865  m_DedicatedAllocation.m_pMappedData = *ppData;
8866  m_MapCount = 1;
8867  }
8868  return result;
8869  }
8870 }
8871 
8872 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
8873 {
8874  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8875 
8876  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8877  {
8878  --m_MapCount;
8879  if(m_MapCount == 0)
8880  {
8881  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8882  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8883  hAllocator->m_hDevice,
8884  m_DedicatedAllocation.m_hMemory);
8885  }
8886  }
8887  else
8888  {
8889  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8890  }
8891 }
8892 
8893 #if VMA_STATS_STRING_ENABLED
8894 
8895 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8896 {
8897  json.BeginObject();
8898 
8899  json.WriteString("Blocks");
8900  json.WriteNumber(stat.blockCount);
8901 
8902  json.WriteString("Allocations");
8903  json.WriteNumber(stat.allocationCount);
8904 
8905  json.WriteString("UnusedRanges");
8906  json.WriteNumber(stat.unusedRangeCount);
8907 
8908  json.WriteString("UsedBytes");
8909  json.WriteNumber(stat.usedBytes);
8910 
8911  json.WriteString("UnusedBytes");
8912  json.WriteNumber(stat.unusedBytes);
8913 
8914  if(stat.allocationCount > 1)
8915  {
8916  json.WriteString("AllocationSize");
8917  json.BeginObject(true);
8918  json.WriteString("Min");
8919  json.WriteNumber(stat.allocationSizeMin);
8920  json.WriteString("Avg");
8921  json.WriteNumber(stat.allocationSizeAvg);
8922  json.WriteString("Max");
8923  json.WriteNumber(stat.allocationSizeMax);
8924  json.EndObject();
8925  }
8926 
8927  if(stat.unusedRangeCount > 1)
8928  {
8929  json.WriteString("UnusedRangeSize");
8930  json.BeginObject(true);
8931  json.WriteString("Min");
8932  json.WriteNumber(stat.unusedRangeSizeMin);
8933  json.WriteString("Avg");
8934  json.WriteNumber(stat.unusedRangeSizeAvg);
8935  json.WriteString("Max");
8936  json.WriteNumber(stat.unusedRangeSizeMax);
8937  json.EndObject();
8938  }
8939 
8940  json.EndObject();
8941 }
8942 
8943 #endif // #if VMA_STATS_STRING_ENABLED
8944 
8945 struct VmaSuballocationItemSizeLess
8946 {
8947  bool operator()(
8948  const VmaSuballocationList::iterator lhs,
8949  const VmaSuballocationList::iterator rhs) const
8950  {
8951  return lhs->size < rhs->size;
8952  }
8953  bool operator()(
8954  const VmaSuballocationList::iterator lhs,
8955  VkDeviceSize rhsSize) const
8956  {
8957  return lhs->size < rhsSize;
8958  }
8959 };
8960 
8961 
8963 // class VmaBlockMetadata
8964 
8965 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8966  m_Size(0),
8967  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8968 {
8969 }
8970 
8971 #if VMA_STATS_STRING_ENABLED
8972 
8973 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8974  VkDeviceSize unusedBytes,
8975  size_t allocationCount,
8976  size_t unusedRangeCount) const
8977 {
8978  json.BeginObject();
8979 
8980  json.WriteString("TotalBytes");
8981  json.WriteNumber(GetSize());
8982 
8983  json.WriteString("UnusedBytes");
8984  json.WriteNumber(unusedBytes);
8985 
8986  json.WriteString("Allocations");
8987  json.WriteNumber((uint64_t)allocationCount);
8988 
8989  json.WriteString("UnusedRanges");
8990  json.WriteNumber((uint64_t)unusedRangeCount);
8991 
8992  json.WriteString("Suballocations");
8993  json.BeginArray();
8994 }
8995 
8996 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8997  VkDeviceSize offset,
8998  VmaAllocation hAllocation) const
8999 {
9000  json.BeginObject(true);
9001 
9002  json.WriteString("Offset");
9003  json.WriteNumber(offset);
9004 
9005  hAllocation->PrintParameters(json);
9006 
9007  json.EndObject();
9008 }
9009 
9010 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
9011  VkDeviceSize offset,
9012  VkDeviceSize size) const
9013 {
9014  json.BeginObject(true);
9015 
9016  json.WriteString("Offset");
9017  json.WriteNumber(offset);
9018 
9019  json.WriteString("Type");
9020  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
9021 
9022  json.WriteString("Size");
9023  json.WriteNumber(size);
9024 
9025  json.EndObject();
9026 }
9027 
9028 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
9029 {
9030  json.EndArray();
9031  json.EndObject();
9032 }
9033 
9034 #endif // #if VMA_STATS_STRING_ENABLED
9035 
9037 // class VmaBlockMetadata_Generic
9038 
9039 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
9040  VmaBlockMetadata(hAllocator),
9041  m_FreeCount(0),
9042  m_SumFreeSize(0),
9043  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9044  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
9045 {
9046 }
9047 
9048 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
9049 {
9050 }
9051 
9052 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
9053 {
9054  VmaBlockMetadata::Init(size);
9055 
9056  m_FreeCount = 1;
9057  m_SumFreeSize = size;
9058 
9059  VmaSuballocation suballoc = {};
9060  suballoc.offset = 0;
9061  suballoc.size = size;
9062  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9063  suballoc.hAllocation = VK_NULL_HANDLE;
9064 
9065  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9066  m_Suballocations.push_back(suballoc);
9067  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
9068  --suballocItem;
9069  m_FreeSuballocationsBySize.push_back(suballocItem);
9070 }
9071 
9072 bool VmaBlockMetadata_Generic::Validate() const
9073 {
9074  VMA_VALIDATE(!m_Suballocations.empty());
9075 
9076  // Expected offset of new suballocation as calculated from previous ones.
9077  VkDeviceSize calculatedOffset = 0;
9078  // Expected number of free suballocations as calculated from traversing their list.
9079  uint32_t calculatedFreeCount = 0;
9080  // Expected sum size of free suballocations as calculated from traversing their list.
9081  VkDeviceSize calculatedSumFreeSize = 0;
9082  // Expected number of free suballocations that should be registered in
9083  // m_FreeSuballocationsBySize calculated from traversing their list.
9084  size_t freeSuballocationsToRegister = 0;
9085  // True if previous visited suballocation was free.
9086  bool prevFree = false;
9087 
9088  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9089  suballocItem != m_Suballocations.cend();
9090  ++suballocItem)
9091  {
9092  const VmaSuballocation& subAlloc = *suballocItem;
9093 
9094  // Actual offset of this suballocation doesn't match expected one.
9095  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
9096 
9097  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
9098  // Two adjacent free suballocations are invalid. They should be merged.
9099  VMA_VALIDATE(!prevFree || !currFree);
9100 
9101  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
9102 
9103  if(currFree)
9104  {
9105  calculatedSumFreeSize += subAlloc.size;
9106  ++calculatedFreeCount;
9107  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9108  {
9109  ++freeSuballocationsToRegister;
9110  }
9111 
9112  // Margin required between allocations - every free space must be at least that large.
9113  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
9114  }
9115  else
9116  {
9117  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
9118  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
9119 
9120  // Margin required between allocations - previous allocation must be free.
9121  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
9122  }
9123 
9124  calculatedOffset += subAlloc.size;
9125  prevFree = currFree;
9126  }
9127 
9128  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
9129  // match expected one.
9130  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
9131 
9132  VkDeviceSize lastSize = 0;
9133  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
9134  {
9135  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
9136 
9137  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
9138  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9139  // They must be sorted by size ascending.
9140  VMA_VALIDATE(suballocItem->size >= lastSize);
9141 
9142  lastSize = suballocItem->size;
9143  }
9144 
9145  // Check if totals match calculacted values.
9146  VMA_VALIDATE(ValidateFreeSuballocationList());
9147  VMA_VALIDATE(calculatedOffset == GetSize());
9148  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
9149  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
9150 
9151  return true;
9152 }
9153 
9154 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
9155 {
9156  if(!m_FreeSuballocationsBySize.empty())
9157  {
9158  return m_FreeSuballocationsBySize.back()->size;
9159  }
9160  else
9161  {
9162  return 0;
9163  }
9164 }
9165 
9166 bool VmaBlockMetadata_Generic::IsEmpty() const
9167 {
9168  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
9169 }
9170 
9171 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9172 {
9173  outInfo.blockCount = 1;
9174 
9175  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9176  outInfo.allocationCount = rangeCount - m_FreeCount;
9177  outInfo.unusedRangeCount = m_FreeCount;
9178 
9179  outInfo.unusedBytes = m_SumFreeSize;
9180  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
9181 
9182  outInfo.allocationSizeMin = UINT64_MAX;
9183  outInfo.allocationSizeMax = 0;
9184  outInfo.unusedRangeSizeMin = UINT64_MAX;
9185  outInfo.unusedRangeSizeMax = 0;
9186 
9187  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9188  suballocItem != m_Suballocations.cend();
9189  ++suballocItem)
9190  {
9191  const VmaSuballocation& suballoc = *suballocItem;
9192  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9193  {
9194  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9195  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
9196  }
9197  else
9198  {
9199  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
9200  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
9201  }
9202  }
9203 }
9204 
9205 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
9206 {
9207  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9208 
9209  inoutStats.size += GetSize();
9210  inoutStats.unusedSize += m_SumFreeSize;
9211  inoutStats.allocationCount += rangeCount - m_FreeCount;
9212  inoutStats.unusedRangeCount += m_FreeCount;
9213  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9214 }
9215 
9216 #if VMA_STATS_STRING_ENABLED
9217 
9218 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
9219 {
9220  PrintDetailedMap_Begin(json,
9221  m_SumFreeSize, // unusedBytes
9222  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
9223  m_FreeCount); // unusedRangeCount
9224 
9225  size_t i = 0;
9226  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9227  suballocItem != m_Suballocations.cend();
9228  ++suballocItem, ++i)
9229  {
9230  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9231  {
9232  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
9233  }
9234  else
9235  {
9236  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
9237  }
9238  }
9239 
9240  PrintDetailedMap_End(json);
9241 }
9242 
9243 #endif // #if VMA_STATS_STRING_ENABLED
9244 
9245 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
9246  uint32_t currentFrameIndex,
9247  uint32_t frameInUseCount,
9248  VkDeviceSize bufferImageGranularity,
9249  VkDeviceSize allocSize,
9250  VkDeviceSize allocAlignment,
9251  bool upperAddress,
9252  VmaSuballocationType allocType,
9253  bool canMakeOtherLost,
9254  uint32_t strategy,
9255  VmaAllocationRequest* pAllocationRequest)
9256 {
9257  VMA_ASSERT(allocSize > 0);
9258  VMA_ASSERT(!upperAddress);
9259  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9260  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9261  VMA_HEAVY_ASSERT(Validate());
9262 
9263  pAllocationRequest->type = VmaAllocationRequestType::Normal;
9264 
9265  // There is not enough total free space in this block to fullfill the request: Early return.
9266  if(canMakeOtherLost == false &&
9267  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
9268  {
9269  return false;
9270  }
9271 
9272  // New algorithm, efficiently searching freeSuballocationsBySize.
9273  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
9274  if(freeSuballocCount > 0)
9275  {
9277  {
9278  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
9279  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9280  m_FreeSuballocationsBySize.data(),
9281  m_FreeSuballocationsBySize.data() + freeSuballocCount,
9282  allocSize + 2 * VMA_DEBUG_MARGIN,
9283  VmaSuballocationItemSizeLess());
9284  size_t index = it - m_FreeSuballocationsBySize.data();
9285  for(; index < freeSuballocCount; ++index)
9286  {
9287  if(CheckAllocation(
9288  currentFrameIndex,
9289  frameInUseCount,
9290  bufferImageGranularity,
9291  allocSize,
9292  allocAlignment,
9293  allocType,
9294  m_FreeSuballocationsBySize[index],
9295  false, // canMakeOtherLost
9296  &pAllocationRequest->offset,
9297  &pAllocationRequest->itemsToMakeLostCount,
9298  &pAllocationRequest->sumFreeSize,
9299  &pAllocationRequest->sumItemSize))
9300  {
9301  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9302  return true;
9303  }
9304  }
9305  }
9306  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
9307  {
9308  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9309  it != m_Suballocations.end();
9310  ++it)
9311  {
9312  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
9313  currentFrameIndex,
9314  frameInUseCount,
9315  bufferImageGranularity,
9316  allocSize,
9317  allocAlignment,
9318  allocType,
9319  it,
9320  false, // canMakeOtherLost
9321  &pAllocationRequest->offset,
9322  &pAllocationRequest->itemsToMakeLostCount,
9323  &pAllocationRequest->sumFreeSize,
9324  &pAllocationRequest->sumItemSize))
9325  {
9326  pAllocationRequest->item = it;
9327  return true;
9328  }
9329  }
9330  }
9331  else // WORST_FIT, FIRST_FIT
9332  {
9333  // Search staring from biggest suballocations.
9334  for(size_t index = freeSuballocCount; index--; )
9335  {
9336  if(CheckAllocation(
9337  currentFrameIndex,
9338  frameInUseCount,
9339  bufferImageGranularity,
9340  allocSize,
9341  allocAlignment,
9342  allocType,
9343  m_FreeSuballocationsBySize[index],
9344  false, // canMakeOtherLost
9345  &pAllocationRequest->offset,
9346  &pAllocationRequest->itemsToMakeLostCount,
9347  &pAllocationRequest->sumFreeSize,
9348  &pAllocationRequest->sumItemSize))
9349  {
9350  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9351  return true;
9352  }
9353  }
9354  }
9355  }
9356 
9357  if(canMakeOtherLost)
9358  {
9359  // Brute-force algorithm. TODO: Come up with something better.
9360 
9361  bool found = false;
9362  VmaAllocationRequest tmpAllocRequest = {};
9363  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
9364  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
9365  suballocIt != m_Suballocations.end();
9366  ++suballocIt)
9367  {
9368  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
9369  suballocIt->hAllocation->CanBecomeLost())
9370  {
9371  if(CheckAllocation(
9372  currentFrameIndex,
9373  frameInUseCount,
9374  bufferImageGranularity,
9375  allocSize,
9376  allocAlignment,
9377  allocType,
9378  suballocIt,
9379  canMakeOtherLost,
9380  &tmpAllocRequest.offset,
9381  &tmpAllocRequest.itemsToMakeLostCount,
9382  &tmpAllocRequest.sumFreeSize,
9383  &tmpAllocRequest.sumItemSize))
9384  {
9386  {
9387  *pAllocationRequest = tmpAllocRequest;
9388  pAllocationRequest->item = suballocIt;
9389  break;
9390  }
9391  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
9392  {
9393  *pAllocationRequest = tmpAllocRequest;
9394  pAllocationRequest->item = suballocIt;
9395  found = true;
9396  }
9397  }
9398  }
9399  }
9400 
9401  return found;
9402  }
9403 
9404  return false;
9405 }
9406 
9407 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
9408  uint32_t currentFrameIndex,
9409  uint32_t frameInUseCount,
9410  VmaAllocationRequest* pAllocationRequest)
9411 {
9412  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
9413 
9414  while(pAllocationRequest->itemsToMakeLostCount > 0)
9415  {
9416  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
9417  {
9418  ++pAllocationRequest->item;
9419  }
9420  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9421  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
9422  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
9423  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9424  {
9425  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
9426  --pAllocationRequest->itemsToMakeLostCount;
9427  }
9428  else
9429  {
9430  return false;
9431  }
9432  }
9433 
9434  VMA_HEAVY_ASSERT(Validate());
9435  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9436  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
9437 
9438  return true;
9439 }
9440 
9441 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9442 {
9443  uint32_t lostAllocationCount = 0;
9444  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9445  it != m_Suballocations.end();
9446  ++it)
9447  {
9448  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
9449  it->hAllocation->CanBecomeLost() &&
9450  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9451  {
9452  it = FreeSuballocation(it);
9453  ++lostAllocationCount;
9454  }
9455  }
9456  return lostAllocationCount;
9457 }
9458 
9459 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
9460 {
9461  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9462  it != m_Suballocations.end();
9463  ++it)
9464  {
9465  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
9466  {
9467  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
9468  {
9469  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9470  return VK_ERROR_VALIDATION_FAILED_EXT;
9471  }
9472  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
9473  {
9474  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9475  return VK_ERROR_VALIDATION_FAILED_EXT;
9476  }
9477  }
9478  }
9479 
9480  return VK_SUCCESS;
9481 }
9482 
9483 void VmaBlockMetadata_Generic::Alloc(
9484  const VmaAllocationRequest& request,
9485  VmaSuballocationType type,
9486  VkDeviceSize allocSize,
9487  VmaAllocation hAllocation)
9488 {
9489  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
9490  VMA_ASSERT(request.item != m_Suballocations.end());
9491  VmaSuballocation& suballoc = *request.item;
9492  // Given suballocation is a free block.
9493  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9494  // Given offset is inside this suballocation.
9495  VMA_ASSERT(request.offset >= suballoc.offset);
9496  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
9497  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
9498  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
9499 
9500  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
9501  // it to become used.
9502  UnregisterFreeSuballocation(request.item);
9503 
9504  suballoc.offset = request.offset;
9505  suballoc.size = allocSize;
9506  suballoc.type = type;
9507  suballoc.hAllocation = hAllocation;
9508 
9509  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
9510  if(paddingEnd)
9511  {
9512  VmaSuballocation paddingSuballoc = {};
9513  paddingSuballoc.offset = request.offset + allocSize;
9514  paddingSuballoc.size = paddingEnd;
9515  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9516  VmaSuballocationList::iterator next = request.item;
9517  ++next;
9518  const VmaSuballocationList::iterator paddingEndItem =
9519  m_Suballocations.insert(next, paddingSuballoc);
9520  RegisterFreeSuballocation(paddingEndItem);
9521  }
9522 
9523  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
9524  if(paddingBegin)
9525  {
9526  VmaSuballocation paddingSuballoc = {};
9527  paddingSuballoc.offset = request.offset - paddingBegin;
9528  paddingSuballoc.size = paddingBegin;
9529  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9530  const VmaSuballocationList::iterator paddingBeginItem =
9531  m_Suballocations.insert(request.item, paddingSuballoc);
9532  RegisterFreeSuballocation(paddingBeginItem);
9533  }
9534 
9535  // Update totals.
9536  m_FreeCount = m_FreeCount - 1;
9537  if(paddingBegin > 0)
9538  {
9539  ++m_FreeCount;
9540  }
9541  if(paddingEnd > 0)
9542  {
9543  ++m_FreeCount;
9544  }
9545  m_SumFreeSize -= allocSize;
9546 }
9547 
9548 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
9549 {
9550  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9551  suballocItem != m_Suballocations.end();
9552  ++suballocItem)
9553  {
9554  VmaSuballocation& suballoc = *suballocItem;
9555  if(suballoc.hAllocation == allocation)
9556  {
9557  FreeSuballocation(suballocItem);
9558  VMA_HEAVY_ASSERT(Validate());
9559  return;
9560  }
9561  }
9562  VMA_ASSERT(0 && "Not found!");
9563 }
9564 
9565 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
9566 {
9567  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9568  suballocItem != m_Suballocations.end();
9569  ++suballocItem)
9570  {
9571  VmaSuballocation& suballoc = *suballocItem;
9572  if(suballoc.offset == offset)
9573  {
9574  FreeSuballocation(suballocItem);
9575  return;
9576  }
9577  }
9578  VMA_ASSERT(0 && "Not found!");
9579 }
9580 
9581 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
9582 {
9583  VkDeviceSize lastSize = 0;
9584  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
9585  {
9586  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
9587 
9588  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
9589  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9590  VMA_VALIDATE(it->size >= lastSize);
9591  lastSize = it->size;
9592  }
9593  return true;
9594 }
9595 
9596 bool VmaBlockMetadata_Generic::CheckAllocation(
9597  uint32_t currentFrameIndex,
9598  uint32_t frameInUseCount,
9599  VkDeviceSize bufferImageGranularity,
9600  VkDeviceSize allocSize,
9601  VkDeviceSize allocAlignment,
9602  VmaSuballocationType allocType,
9603  VmaSuballocationList::const_iterator suballocItem,
9604  bool canMakeOtherLost,
9605  VkDeviceSize* pOffset,
9606  size_t* itemsToMakeLostCount,
9607  VkDeviceSize* pSumFreeSize,
9608  VkDeviceSize* pSumItemSize) const
9609 {
9610  VMA_ASSERT(allocSize > 0);
9611  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9612  VMA_ASSERT(suballocItem != m_Suballocations.cend());
9613  VMA_ASSERT(pOffset != VMA_NULL);
9614 
9615  *itemsToMakeLostCount = 0;
9616  *pSumFreeSize = 0;
9617  *pSumItemSize = 0;
9618 
9619  if(canMakeOtherLost)
9620  {
9621  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9622  {
9623  *pSumFreeSize = suballocItem->size;
9624  }
9625  else
9626  {
9627  if(suballocItem->hAllocation->CanBecomeLost() &&
9628  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9629  {
9630  ++*itemsToMakeLostCount;
9631  *pSumItemSize = suballocItem->size;
9632  }
9633  else
9634  {
9635  return false;
9636  }
9637  }
9638 
9639  // Remaining size is too small for this request: Early return.
9640  if(GetSize() - suballocItem->offset < allocSize)
9641  {
9642  return false;
9643  }
9644 
9645  // Start from offset equal to beginning of this suballocation.
9646  *pOffset = suballocItem->offset;
9647 
9648  // Apply VMA_DEBUG_MARGIN at the beginning.
9649  if(VMA_DEBUG_MARGIN > 0)
9650  {
9651  *pOffset += VMA_DEBUG_MARGIN;
9652  }
9653 
9654  // Apply alignment.
9655  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9656 
9657  // Check previous suballocations for BufferImageGranularity conflicts.
9658  // Make bigger alignment if necessary.
9659  if(bufferImageGranularity > 1)
9660  {
9661  bool bufferImageGranularityConflict = false;
9662  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9663  while(prevSuballocItem != m_Suballocations.cbegin())
9664  {
9665  --prevSuballocItem;
9666  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9667  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9668  {
9669  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9670  {
9671  bufferImageGranularityConflict = true;
9672  break;
9673  }
9674  }
9675  else
9676  // Already on previous page.
9677  break;
9678  }
9679  if(bufferImageGranularityConflict)
9680  {
9681  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9682  }
9683  }
9684 
9685  // Now that we have final *pOffset, check if we are past suballocItem.
9686  // If yes, return false - this function should be called for another suballocItem as starting point.
9687  if(*pOffset >= suballocItem->offset + suballocItem->size)
9688  {
9689  return false;
9690  }
9691 
9692  // Calculate padding at the beginning based on current offset.
9693  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
9694 
9695  // Calculate required margin at the end.
9696  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9697 
9698  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
9699  // Another early return check.
9700  if(suballocItem->offset + totalSize > GetSize())
9701  {
9702  return false;
9703  }
9704 
9705  // Advance lastSuballocItem until desired size is reached.
9706  // Update itemsToMakeLostCount.
9707  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
9708  if(totalSize > suballocItem->size)
9709  {
9710  VkDeviceSize remainingSize = totalSize - suballocItem->size;
9711  while(remainingSize > 0)
9712  {
9713  ++lastSuballocItem;
9714  if(lastSuballocItem == m_Suballocations.cend())
9715  {
9716  return false;
9717  }
9718  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9719  {
9720  *pSumFreeSize += lastSuballocItem->size;
9721  }
9722  else
9723  {
9724  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
9725  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
9726  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9727  {
9728  ++*itemsToMakeLostCount;
9729  *pSumItemSize += lastSuballocItem->size;
9730  }
9731  else
9732  {
9733  return false;
9734  }
9735  }
9736  remainingSize = (lastSuballocItem->size < remainingSize) ?
9737  remainingSize - lastSuballocItem->size : 0;
9738  }
9739  }
9740 
9741  // Check next suballocations for BufferImageGranularity conflicts.
9742  // If conflict exists, we must mark more allocations lost or fail.
9743  if(bufferImageGranularity > 1)
9744  {
9745  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
9746  ++nextSuballocItem;
9747  while(nextSuballocItem != m_Suballocations.cend())
9748  {
9749  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9750  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9751  {
9752  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9753  {
9754  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
9755  if(nextSuballoc.hAllocation->CanBecomeLost() &&
9756  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9757  {
9758  ++*itemsToMakeLostCount;
9759  }
9760  else
9761  {
9762  return false;
9763  }
9764  }
9765  }
9766  else
9767  {
9768  // Already on next page.
9769  break;
9770  }
9771  ++nextSuballocItem;
9772  }
9773  }
9774  }
9775  else
9776  {
9777  const VmaSuballocation& suballoc = *suballocItem;
9778  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9779 
9780  *pSumFreeSize = suballoc.size;
9781 
9782  // Size of this suballocation is too small for this request: Early return.
9783  if(suballoc.size < allocSize)
9784  {
9785  return false;
9786  }
9787 
9788  // Start from offset equal to beginning of this suballocation.
9789  *pOffset = suballoc.offset;
9790 
9791  // Apply VMA_DEBUG_MARGIN at the beginning.
9792  if(VMA_DEBUG_MARGIN > 0)
9793  {
9794  *pOffset += VMA_DEBUG_MARGIN;
9795  }
9796 
9797  // Apply alignment.
9798  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9799 
9800  // Check previous suballocations for BufferImageGranularity conflicts.
9801  // Make bigger alignment if necessary.
9802  if(bufferImageGranularity > 1)
9803  {
9804  bool bufferImageGranularityConflict = false;
9805  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9806  while(prevSuballocItem != m_Suballocations.cbegin())
9807  {
9808  --prevSuballocItem;
9809  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9810  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9811  {
9812  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9813  {
9814  bufferImageGranularityConflict = true;
9815  break;
9816  }
9817  }
9818  else
9819  // Already on previous page.
9820  break;
9821  }
9822  if(bufferImageGranularityConflict)
9823  {
9824  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9825  }
9826  }
9827 
9828  // Calculate padding at the beginning based on current offset.
9829  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
9830 
9831  // Calculate required margin at the end.
9832  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9833 
9834  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
9835  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
9836  {
9837  return false;
9838  }
9839 
9840  // Check next suballocations for BufferImageGranularity conflicts.
9841  // If conflict exists, allocation cannot be made here.
9842  if(bufferImageGranularity > 1)
9843  {
9844  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
9845  ++nextSuballocItem;
9846  while(nextSuballocItem != m_Suballocations.cend())
9847  {
9848  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9849  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9850  {
9851  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9852  {
9853  return false;
9854  }
9855  }
9856  else
9857  {
9858  // Already on next page.
9859  break;
9860  }
9861  ++nextSuballocItem;
9862  }
9863  }
9864  }
9865 
9866  // All tests passed: Success. pOffset is already filled.
9867  return true;
9868 }
9869 
9870 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
9871 {
9872  VMA_ASSERT(item != m_Suballocations.end());
9873  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9874 
9875  VmaSuballocationList::iterator nextItem = item;
9876  ++nextItem;
9877  VMA_ASSERT(nextItem != m_Suballocations.end());
9878  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9879 
9880  item->size += nextItem->size;
9881  --m_FreeCount;
9882  m_Suballocations.erase(nextItem);
9883 }
9884 
9885 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9886 {
9887  // Change this suballocation to be marked as free.
9888  VmaSuballocation& suballoc = *suballocItem;
9889  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9890  suballoc.hAllocation = VK_NULL_HANDLE;
9891 
9892  // Update totals.
9893  ++m_FreeCount;
9894  m_SumFreeSize += suballoc.size;
9895 
9896  // Merge with previous and/or next suballocation if it's also free.
9897  bool mergeWithNext = false;
9898  bool mergeWithPrev = false;
9899 
9900  VmaSuballocationList::iterator nextItem = suballocItem;
9901  ++nextItem;
9902  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9903  {
9904  mergeWithNext = true;
9905  }
9906 
9907  VmaSuballocationList::iterator prevItem = suballocItem;
9908  if(suballocItem != m_Suballocations.begin())
9909  {
9910  --prevItem;
9911  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9912  {
9913  mergeWithPrev = true;
9914  }
9915  }
9916 
9917  if(mergeWithNext)
9918  {
9919  UnregisterFreeSuballocation(nextItem);
9920  MergeFreeWithNext(suballocItem);
9921  }
9922 
9923  if(mergeWithPrev)
9924  {
9925  UnregisterFreeSuballocation(prevItem);
9926  MergeFreeWithNext(prevItem);
9927  RegisterFreeSuballocation(prevItem);
9928  return prevItem;
9929  }
9930  else
9931  {
9932  RegisterFreeSuballocation(suballocItem);
9933  return suballocItem;
9934  }
9935 }
9936 
9937 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9938 {
9939  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9940  VMA_ASSERT(item->size > 0);
9941 
9942  // You may want to enable this validation at the beginning or at the end of
9943  // this function, depending on what do you want to check.
9944  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9945 
9946  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9947  {
9948  if(m_FreeSuballocationsBySize.empty())
9949  {
9950  m_FreeSuballocationsBySize.push_back(item);
9951  }
9952  else
9953  {
9954  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9955  }
9956  }
9957 
9958  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9959 }
9960 
9961 
9962 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9963 {
9964  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9965  VMA_ASSERT(item->size > 0);
9966 
9967  // You may want to enable this validation at the beginning or at the end of
9968  // this function, depending on what do you want to check.
9969  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9970 
9971  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9972  {
9973  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9974  m_FreeSuballocationsBySize.data(),
9975  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9976  item,
9977  VmaSuballocationItemSizeLess());
9978  for(size_t index = it - m_FreeSuballocationsBySize.data();
9979  index < m_FreeSuballocationsBySize.size();
9980  ++index)
9981  {
9982  if(m_FreeSuballocationsBySize[index] == item)
9983  {
9984  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9985  return;
9986  }
9987  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9988  }
9989  VMA_ASSERT(0 && "Not found.");
9990  }
9991 
9992  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9993 }
9994 
9995 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9996  VkDeviceSize bufferImageGranularity,
9997  VmaSuballocationType& inOutPrevSuballocType) const
9998 {
9999  if(bufferImageGranularity == 1 || IsEmpty())
10000  {
10001  return false;
10002  }
10003 
10004  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
10005  bool typeConflictFound = false;
10006  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
10007  it != m_Suballocations.cend();
10008  ++it)
10009  {
10010  const VmaSuballocationType suballocType = it->type;
10011  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
10012  {
10013  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
10014  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
10015  {
10016  typeConflictFound = true;
10017  }
10018  inOutPrevSuballocType = suballocType;
10019  }
10020  }
10021 
10022  return typeConflictFound || minAlignment >= bufferImageGranularity;
10023 }
10024 
10026 // class VmaBlockMetadata_Linear
10027 
10028 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
10029  VmaBlockMetadata(hAllocator),
10030  m_SumFreeSize(0),
10031  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
10032  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
10033  m_1stVectorIndex(0),
10034  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
10035  m_1stNullItemsBeginCount(0),
10036  m_1stNullItemsMiddleCount(0),
10037  m_2ndNullItemsCount(0)
10038 {
10039 }
10040 
10041 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
10042 {
10043 }
10044 
10045 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
10046 {
10047  VmaBlockMetadata::Init(size);
10048  m_SumFreeSize = size;
10049 }
10050 
10051 bool VmaBlockMetadata_Linear::Validate() const
10052 {
10053  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10054  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10055 
10056  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
10057  VMA_VALIDATE(!suballocations1st.empty() ||
10058  suballocations2nd.empty() ||
10059  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
10060 
10061  if(!suballocations1st.empty())
10062  {
10063  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
10064  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
10065  // Null item at the end should be just pop_back().
10066  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
10067  }
10068  if(!suballocations2nd.empty())
10069  {
10070  // Null item at the end should be just pop_back().
10071  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
10072  }
10073 
10074  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
10075  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
10076 
10077  VkDeviceSize sumUsedSize = 0;
10078  const size_t suballoc1stCount = suballocations1st.size();
10079  VkDeviceSize offset = VMA_DEBUG_MARGIN;
10080 
10081  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10082  {
10083  const size_t suballoc2ndCount = suballocations2nd.size();
10084  size_t nullItem2ndCount = 0;
10085  for(size_t i = 0; i < suballoc2ndCount; ++i)
10086  {
10087  const VmaSuballocation& suballoc = suballocations2nd[i];
10088  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10089 
10090  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10091  VMA_VALIDATE(suballoc.offset >= offset);
10092 
10093  if(!currFree)
10094  {
10095  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10096  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10097  sumUsedSize += suballoc.size;
10098  }
10099  else
10100  {
10101  ++nullItem2ndCount;
10102  }
10103 
10104  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10105  }
10106 
10107  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10108  }
10109 
10110  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
10111  {
10112  const VmaSuballocation& suballoc = suballocations1st[i];
10113  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
10114  suballoc.hAllocation == VK_NULL_HANDLE);
10115  }
10116 
10117  size_t nullItem1stCount = m_1stNullItemsBeginCount;
10118 
10119  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
10120  {
10121  const VmaSuballocation& suballoc = suballocations1st[i];
10122  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10123 
10124  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10125  VMA_VALIDATE(suballoc.offset >= offset);
10126  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
10127 
10128  if(!currFree)
10129  {
10130  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10131  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10132  sumUsedSize += suballoc.size;
10133  }
10134  else
10135  {
10136  ++nullItem1stCount;
10137  }
10138 
10139  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10140  }
10141  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
10142 
10143  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10144  {
10145  const size_t suballoc2ndCount = suballocations2nd.size();
10146  size_t nullItem2ndCount = 0;
10147  for(size_t i = suballoc2ndCount; i--; )
10148  {
10149  const VmaSuballocation& suballoc = suballocations2nd[i];
10150  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10151 
10152  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10153  VMA_VALIDATE(suballoc.offset >= offset);
10154 
10155  if(!currFree)
10156  {
10157  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10158  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10159  sumUsedSize += suballoc.size;
10160  }
10161  else
10162  {
10163  ++nullItem2ndCount;
10164  }
10165 
10166  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10167  }
10168 
10169  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10170  }
10171 
10172  VMA_VALIDATE(offset <= GetSize());
10173  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
10174 
10175  return true;
10176 }
10177 
10178 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
10179 {
10180  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
10181  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
10182 }
10183 
10184 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
10185 {
10186  const VkDeviceSize size = GetSize();
10187 
10188  /*
10189  We don't consider gaps inside allocation vectors with freed allocations because
10190  they are not suitable for reuse in linear allocator. We consider only space that
10191  is available for new allocations.
10192  */
10193  if(IsEmpty())
10194  {
10195  return size;
10196  }
10197 
10198  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10199 
10200  switch(m_2ndVectorMode)
10201  {
10202  case SECOND_VECTOR_EMPTY:
10203  /*
10204  Available space is after end of 1st, as well as before beginning of 1st (which
10205  whould make it a ring buffer).
10206  */
10207  {
10208  const size_t suballocations1stCount = suballocations1st.size();
10209  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
10210  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10211  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
10212  return VMA_MAX(
10213  firstSuballoc.offset,
10214  size - (lastSuballoc.offset + lastSuballoc.size));
10215  }
10216  break;
10217 
10218  case SECOND_VECTOR_RING_BUFFER:
10219  /*
10220  Available space is only between end of 2nd and beginning of 1st.
10221  */
10222  {
10223  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10224  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
10225  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
10226  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
10227  }
10228  break;
10229 
10230  case SECOND_VECTOR_DOUBLE_STACK:
10231  /*
10232  Available space is only between end of 1st and top of 2nd.
10233  */
10234  {
10235  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10236  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
10237  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
10238  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
10239  }
10240  break;
10241 
10242  default:
10243  VMA_ASSERT(0);
10244  return 0;
10245  }
10246 }
10247 
10248 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10249 {
10250  const VkDeviceSize size = GetSize();
10251  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10252  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10253  const size_t suballoc1stCount = suballocations1st.size();
10254  const size_t suballoc2ndCount = suballocations2nd.size();
10255 
10256  outInfo.blockCount = 1;
10257  outInfo.allocationCount = (uint32_t)GetAllocationCount();
10258  outInfo.unusedRangeCount = 0;
10259  outInfo.usedBytes = 0;
10260  outInfo.allocationSizeMin = UINT64_MAX;
10261  outInfo.allocationSizeMax = 0;
10262  outInfo.unusedRangeSizeMin = UINT64_MAX;
10263  outInfo.unusedRangeSizeMax = 0;
10264 
10265  VkDeviceSize lastOffset = 0;
10266 
10267  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10268  {
10269  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10270  size_t nextAlloc2ndIndex = 0;
10271  while(lastOffset < freeSpace2ndTo1stEnd)
10272  {
10273  // Find next non-null allocation or move nextAllocIndex to the end.
10274  while(nextAlloc2ndIndex < suballoc2ndCount &&
10275  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10276  {
10277  ++nextAlloc2ndIndex;
10278  }
10279 
10280  // Found non-null allocation.
10281  if(nextAlloc2ndIndex < suballoc2ndCount)
10282  {
10283  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10284 
10285  // 1. Process free space before this allocation.
10286  if(lastOffset < suballoc.offset)
10287  {
10288  // There is free space from lastOffset to suballoc.offset.
10289  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10290  ++outInfo.unusedRangeCount;
10291  outInfo.unusedBytes += unusedRangeSize;
10292  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10293  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10294  }
10295 
10296  // 2. Process this allocation.
10297  // There is allocation with suballoc.offset, suballoc.size.
10298  outInfo.usedBytes += suballoc.size;
10299  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10300  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10301 
10302  // 3. Prepare for next iteration.
10303  lastOffset = suballoc.offset + suballoc.size;
10304  ++nextAlloc2ndIndex;
10305  }
10306  // We are at the end.
10307  else
10308  {
10309  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10310  if(lastOffset < freeSpace2ndTo1stEnd)
10311  {
10312  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10313  ++outInfo.unusedRangeCount;
10314  outInfo.unusedBytes += unusedRangeSize;
10315  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10316  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10317  }
10318 
10319  // End of loop.
10320  lastOffset = freeSpace2ndTo1stEnd;
10321  }
10322  }
10323  }
10324 
10325  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10326  const VkDeviceSize freeSpace1stTo2ndEnd =
10327  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10328  while(lastOffset < freeSpace1stTo2ndEnd)
10329  {
10330  // Find next non-null allocation or move nextAllocIndex to the end.
10331  while(nextAlloc1stIndex < suballoc1stCount &&
10332  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10333  {
10334  ++nextAlloc1stIndex;
10335  }
10336 
10337  // Found non-null allocation.
10338  if(nextAlloc1stIndex < suballoc1stCount)
10339  {
10340  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10341 
10342  // 1. Process free space before this allocation.
10343  if(lastOffset < suballoc.offset)
10344  {
10345  // There is free space from lastOffset to suballoc.offset.
10346  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10347  ++outInfo.unusedRangeCount;
10348  outInfo.unusedBytes += unusedRangeSize;
10349  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10350  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10351  }
10352 
10353  // 2. Process this allocation.
10354  // There is allocation with suballoc.offset, suballoc.size.
10355  outInfo.usedBytes += suballoc.size;
10356  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10357  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10358 
10359  // 3. Prepare for next iteration.
10360  lastOffset = suballoc.offset + suballoc.size;
10361  ++nextAlloc1stIndex;
10362  }
10363  // We are at the end.
10364  else
10365  {
10366  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10367  if(lastOffset < freeSpace1stTo2ndEnd)
10368  {
10369  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10370  ++outInfo.unusedRangeCount;
10371  outInfo.unusedBytes += unusedRangeSize;
10372  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10373  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10374  }
10375 
10376  // End of loop.
10377  lastOffset = freeSpace1stTo2ndEnd;
10378  }
10379  }
10380 
10381  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10382  {
10383  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10384  while(lastOffset < size)
10385  {
10386  // Find next non-null allocation or move nextAllocIndex to the end.
10387  while(nextAlloc2ndIndex != SIZE_MAX &&
10388  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10389  {
10390  --nextAlloc2ndIndex;
10391  }
10392 
10393  // Found non-null allocation.
10394  if(nextAlloc2ndIndex != SIZE_MAX)
10395  {
10396  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10397 
10398  // 1. Process free space before this allocation.
10399  if(lastOffset < suballoc.offset)
10400  {
10401  // There is free space from lastOffset to suballoc.offset.
10402  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10403  ++outInfo.unusedRangeCount;
10404  outInfo.unusedBytes += unusedRangeSize;
10405  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10406  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10407  }
10408 
10409  // 2. Process this allocation.
10410  // There is allocation with suballoc.offset, suballoc.size.
10411  outInfo.usedBytes += suballoc.size;
10412  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10413  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10414 
10415  // 3. Prepare for next iteration.
10416  lastOffset = suballoc.offset + suballoc.size;
10417  --nextAlloc2ndIndex;
10418  }
10419  // We are at the end.
10420  else
10421  {
10422  // There is free space from lastOffset to size.
10423  if(lastOffset < size)
10424  {
10425  const VkDeviceSize unusedRangeSize = size - lastOffset;
10426  ++outInfo.unusedRangeCount;
10427  outInfo.unusedBytes += unusedRangeSize;
10428  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10429  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10430  }
10431 
10432  // End of loop.
10433  lastOffset = size;
10434  }
10435  }
10436  }
10437 
10438  outInfo.unusedBytes = size - outInfo.usedBytes;
10439 }
10440 
10441 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
10442 {
10443  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10444  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10445  const VkDeviceSize size = GetSize();
10446  const size_t suballoc1stCount = suballocations1st.size();
10447  const size_t suballoc2ndCount = suballocations2nd.size();
10448 
10449  inoutStats.size += size;
10450 
10451  VkDeviceSize lastOffset = 0;
10452 
10453  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10454  {
10455  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10456  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
10457  while(lastOffset < freeSpace2ndTo1stEnd)
10458  {
10459  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10460  while(nextAlloc2ndIndex < suballoc2ndCount &&
10461  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10462  {
10463  ++nextAlloc2ndIndex;
10464  }
10465 
10466  // Found non-null allocation.
10467  if(nextAlloc2ndIndex < suballoc2ndCount)
10468  {
10469  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10470 
10471  // 1. Process free space before this allocation.
10472  if(lastOffset < suballoc.offset)
10473  {
10474  // There is free space from lastOffset to suballoc.offset.
10475  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10476  inoutStats.unusedSize += unusedRangeSize;
10477  ++inoutStats.unusedRangeCount;
10478  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10479  }
10480 
10481  // 2. Process this allocation.
10482  // There is allocation with suballoc.offset, suballoc.size.
10483  ++inoutStats.allocationCount;
10484 
10485  // 3. Prepare for next iteration.
10486  lastOffset = suballoc.offset + suballoc.size;
10487  ++nextAlloc2ndIndex;
10488  }
10489  // We are at the end.
10490  else
10491  {
10492  if(lastOffset < freeSpace2ndTo1stEnd)
10493  {
10494  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10495  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10496  inoutStats.unusedSize += unusedRangeSize;
10497  ++inoutStats.unusedRangeCount;
10498  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10499  }
10500 
10501  // End of loop.
10502  lastOffset = freeSpace2ndTo1stEnd;
10503  }
10504  }
10505  }
10506 
10507  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10508  const VkDeviceSize freeSpace1stTo2ndEnd =
10509  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10510  while(lastOffset < freeSpace1stTo2ndEnd)
10511  {
10512  // Find next non-null allocation or move nextAllocIndex to the end.
10513  while(nextAlloc1stIndex < suballoc1stCount &&
10514  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10515  {
10516  ++nextAlloc1stIndex;
10517  }
10518 
10519  // Found non-null allocation.
10520  if(nextAlloc1stIndex < suballoc1stCount)
10521  {
10522  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10523 
10524  // 1. Process free space before this allocation.
10525  if(lastOffset < suballoc.offset)
10526  {
10527  // There is free space from lastOffset to suballoc.offset.
10528  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10529  inoutStats.unusedSize += unusedRangeSize;
10530  ++inoutStats.unusedRangeCount;
10531  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10532  }
10533 
10534  // 2. Process this allocation.
10535  // There is allocation with suballoc.offset, suballoc.size.
10536  ++inoutStats.allocationCount;
10537 
10538  // 3. Prepare for next iteration.
10539  lastOffset = suballoc.offset + suballoc.size;
10540  ++nextAlloc1stIndex;
10541  }
10542  // We are at the end.
10543  else
10544  {
10545  if(lastOffset < freeSpace1stTo2ndEnd)
10546  {
10547  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10548  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10549  inoutStats.unusedSize += unusedRangeSize;
10550  ++inoutStats.unusedRangeCount;
10551  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10552  }
10553 
10554  // End of loop.
10555  lastOffset = freeSpace1stTo2ndEnd;
10556  }
10557  }
10558 
10559  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10560  {
10561  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10562  while(lastOffset < size)
10563  {
10564  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10565  while(nextAlloc2ndIndex != SIZE_MAX &&
10566  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10567  {
10568  --nextAlloc2ndIndex;
10569  }
10570 
10571  // Found non-null allocation.
10572  if(nextAlloc2ndIndex != SIZE_MAX)
10573  {
10574  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10575 
10576  // 1. Process free space before this allocation.
10577  if(lastOffset < suballoc.offset)
10578  {
10579  // There is free space from lastOffset to suballoc.offset.
10580  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10581  inoutStats.unusedSize += unusedRangeSize;
10582  ++inoutStats.unusedRangeCount;
10583  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10584  }
10585 
10586  // 2. Process this allocation.
10587  // There is allocation with suballoc.offset, suballoc.size.
10588  ++inoutStats.allocationCount;
10589 
10590  // 3. Prepare for next iteration.
10591  lastOffset = suballoc.offset + suballoc.size;
10592  --nextAlloc2ndIndex;
10593  }
10594  // We are at the end.
10595  else
10596  {
10597  if(lastOffset < size)
10598  {
10599  // There is free space from lastOffset to size.
10600  const VkDeviceSize unusedRangeSize = size - lastOffset;
10601  inoutStats.unusedSize += unusedRangeSize;
10602  ++inoutStats.unusedRangeCount;
10603  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10604  }
10605 
10606  // End of loop.
10607  lastOffset = size;
10608  }
10609  }
10610  }
10611 }
10612 
10613 #if VMA_STATS_STRING_ENABLED
10614 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
10615 {
10616  const VkDeviceSize size = GetSize();
10617  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10618  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10619  const size_t suballoc1stCount = suballocations1st.size();
10620  const size_t suballoc2ndCount = suballocations2nd.size();
10621 
10622  // FIRST PASS
10623 
10624  size_t unusedRangeCount = 0;
10625  VkDeviceSize usedBytes = 0;
10626 
10627  VkDeviceSize lastOffset = 0;
10628 
10629  size_t alloc2ndCount = 0;
10630  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10631  {
10632  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10633  size_t nextAlloc2ndIndex = 0;
10634  while(lastOffset < freeSpace2ndTo1stEnd)
10635  {
10636  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10637  while(nextAlloc2ndIndex < suballoc2ndCount &&
10638  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10639  {
10640  ++nextAlloc2ndIndex;
10641  }
10642 
10643  // Found non-null allocation.
10644  if(nextAlloc2ndIndex < suballoc2ndCount)
10645  {
10646  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10647 
10648  // 1. Process free space before this allocation.
10649  if(lastOffset < suballoc.offset)
10650  {
10651  // There is free space from lastOffset to suballoc.offset.
10652  ++unusedRangeCount;
10653  }
10654 
10655  // 2. Process this allocation.
10656  // There is allocation with suballoc.offset, suballoc.size.
10657  ++alloc2ndCount;
10658  usedBytes += suballoc.size;
10659 
10660  // 3. Prepare for next iteration.
10661  lastOffset = suballoc.offset + suballoc.size;
10662  ++nextAlloc2ndIndex;
10663  }
10664  // We are at the end.
10665  else
10666  {
10667  if(lastOffset < freeSpace2ndTo1stEnd)
10668  {
10669  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10670  ++unusedRangeCount;
10671  }
10672 
10673  // End of loop.
10674  lastOffset = freeSpace2ndTo1stEnd;
10675  }
10676  }
10677  }
10678 
10679  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10680  size_t alloc1stCount = 0;
10681  const VkDeviceSize freeSpace1stTo2ndEnd =
10682  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10683  while(lastOffset < freeSpace1stTo2ndEnd)
10684  {
10685  // Find next non-null allocation or move nextAllocIndex to the end.
10686  while(nextAlloc1stIndex < suballoc1stCount &&
10687  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10688  {
10689  ++nextAlloc1stIndex;
10690  }
10691 
10692  // Found non-null allocation.
10693  if(nextAlloc1stIndex < suballoc1stCount)
10694  {
10695  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10696 
10697  // 1. Process free space before this allocation.
10698  if(lastOffset < suballoc.offset)
10699  {
10700  // There is free space from lastOffset to suballoc.offset.
10701  ++unusedRangeCount;
10702  }
10703 
10704  // 2. Process this allocation.
10705  // There is allocation with suballoc.offset, suballoc.size.
10706  ++alloc1stCount;
10707  usedBytes += suballoc.size;
10708 
10709  // 3. Prepare for next iteration.
10710  lastOffset = suballoc.offset + suballoc.size;
10711  ++nextAlloc1stIndex;
10712  }
10713  // We are at the end.
10714  else
10715  {
10716  if(lastOffset < size)
10717  {
10718  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10719  ++unusedRangeCount;
10720  }
10721 
10722  // End of loop.
10723  lastOffset = freeSpace1stTo2ndEnd;
10724  }
10725  }
10726 
10727  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10728  {
10729  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10730  while(lastOffset < size)
10731  {
10732  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10733  while(nextAlloc2ndIndex != SIZE_MAX &&
10734  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10735  {
10736  --nextAlloc2ndIndex;
10737  }
10738 
10739  // Found non-null allocation.
10740  if(nextAlloc2ndIndex != SIZE_MAX)
10741  {
10742  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10743 
10744  // 1. Process free space before this allocation.
10745  if(lastOffset < suballoc.offset)
10746  {
10747  // There is free space from lastOffset to suballoc.offset.
10748  ++unusedRangeCount;
10749  }
10750 
10751  // 2. Process this allocation.
10752  // There is allocation with suballoc.offset, suballoc.size.
10753  ++alloc2ndCount;
10754  usedBytes += suballoc.size;
10755 
10756  // 3. Prepare for next iteration.
10757  lastOffset = suballoc.offset + suballoc.size;
10758  --nextAlloc2ndIndex;
10759  }
10760  // We are at the end.
10761  else
10762  {
10763  if(lastOffset < size)
10764  {
10765  // There is free space from lastOffset to size.
10766  ++unusedRangeCount;
10767  }
10768 
10769  // End of loop.
10770  lastOffset = size;
10771  }
10772  }
10773  }
10774 
10775  const VkDeviceSize unusedBytes = size - usedBytes;
10776  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
10777 
10778  // SECOND PASS
10779  lastOffset = 0;
10780 
10781  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10782  {
10783  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10784  size_t nextAlloc2ndIndex = 0;
10785  while(lastOffset < freeSpace2ndTo1stEnd)
10786  {
10787  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10788  while(nextAlloc2ndIndex < suballoc2ndCount &&
10789  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10790  {
10791  ++nextAlloc2ndIndex;
10792  }
10793 
10794  // Found non-null allocation.
10795  if(nextAlloc2ndIndex < suballoc2ndCount)
10796  {
10797  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10798 
10799  // 1. Process free space before this allocation.
10800  if(lastOffset < suballoc.offset)
10801  {
10802  // There is free space from lastOffset to suballoc.offset.
10803  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10804  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10805  }
10806 
10807  // 2. Process this allocation.
10808  // There is allocation with suballoc.offset, suballoc.size.
10809  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10810 
10811  // 3. Prepare for next iteration.
10812  lastOffset = suballoc.offset + suballoc.size;
10813  ++nextAlloc2ndIndex;
10814  }
10815  // We are at the end.
10816  else
10817  {
10818  if(lastOffset < freeSpace2ndTo1stEnd)
10819  {
10820  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10821  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10822  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10823  }
10824 
10825  // End of loop.
10826  lastOffset = freeSpace2ndTo1stEnd;
10827  }
10828  }
10829  }
10830 
10831  nextAlloc1stIndex = m_1stNullItemsBeginCount;
10832  while(lastOffset < freeSpace1stTo2ndEnd)
10833  {
10834  // Find next non-null allocation or move nextAllocIndex to the end.
10835  while(nextAlloc1stIndex < suballoc1stCount &&
10836  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10837  {
10838  ++nextAlloc1stIndex;
10839  }
10840 
10841  // Found non-null allocation.
10842  if(nextAlloc1stIndex < suballoc1stCount)
10843  {
10844  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10845 
10846  // 1. Process free space before this allocation.
10847  if(lastOffset < suballoc.offset)
10848  {
10849  // There is free space from lastOffset to suballoc.offset.
10850  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10851  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10852  }
10853 
10854  // 2. Process this allocation.
10855  // There is allocation with suballoc.offset, suballoc.size.
10856  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10857 
10858  // 3. Prepare for next iteration.
10859  lastOffset = suballoc.offset + suballoc.size;
10860  ++nextAlloc1stIndex;
10861  }
10862  // We are at the end.
10863  else
10864  {
10865  if(lastOffset < freeSpace1stTo2ndEnd)
10866  {
10867  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10868  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10869  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10870  }
10871 
10872  // End of loop.
10873  lastOffset = freeSpace1stTo2ndEnd;
10874  }
10875  }
10876 
10877  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10878  {
10879  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10880  while(lastOffset < size)
10881  {
10882  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10883  while(nextAlloc2ndIndex != SIZE_MAX &&
10884  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10885  {
10886  --nextAlloc2ndIndex;
10887  }
10888 
10889  // Found non-null allocation.
10890  if(nextAlloc2ndIndex != SIZE_MAX)
10891  {
10892  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10893 
10894  // 1. Process free space before this allocation.
10895  if(lastOffset < suballoc.offset)
10896  {
10897  // There is free space from lastOffset to suballoc.offset.
10898  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10899  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10900  }
10901 
10902  // 2. Process this allocation.
10903  // There is allocation with suballoc.offset, suballoc.size.
10904  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10905 
10906  // 3. Prepare for next iteration.
10907  lastOffset = suballoc.offset + suballoc.size;
10908  --nextAlloc2ndIndex;
10909  }
10910  // We are at the end.
10911  else
10912  {
10913  if(lastOffset < size)
10914  {
10915  // There is free space from lastOffset to size.
10916  const VkDeviceSize unusedRangeSize = size - lastOffset;
10917  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10918  }
10919 
10920  // End of loop.
10921  lastOffset = size;
10922  }
10923  }
10924  }
10925 
10926  PrintDetailedMap_End(json);
10927 }
10928 #endif // #if VMA_STATS_STRING_ENABLED
10929 
10930 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10931  uint32_t currentFrameIndex,
10932  uint32_t frameInUseCount,
10933  VkDeviceSize bufferImageGranularity,
10934  VkDeviceSize allocSize,
10935  VkDeviceSize allocAlignment,
10936  bool upperAddress,
10937  VmaSuballocationType allocType,
10938  bool canMakeOtherLost,
10939  uint32_t strategy,
10940  VmaAllocationRequest* pAllocationRequest)
10941 {
10942  VMA_ASSERT(allocSize > 0);
10943  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10944  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10945  VMA_HEAVY_ASSERT(Validate());
10946  return upperAddress ?
10947  CreateAllocationRequest_UpperAddress(
10948  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10949  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10950  CreateAllocationRequest_LowerAddress(
10951  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10952  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10953 }
10954 
10955 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10956  uint32_t currentFrameIndex,
10957  uint32_t frameInUseCount,
10958  VkDeviceSize bufferImageGranularity,
10959  VkDeviceSize allocSize,
10960  VkDeviceSize allocAlignment,
10961  VmaSuballocationType allocType,
10962  bool canMakeOtherLost,
10963  uint32_t strategy,
10964  VmaAllocationRequest* pAllocationRequest)
10965 {
10966  const VkDeviceSize size = GetSize();
10967  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10968  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10969 
10970  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10971  {
10972  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10973  return false;
10974  }
10975 
10976  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10977  if(allocSize > size)
10978  {
10979  return false;
10980  }
10981  VkDeviceSize resultBaseOffset = size - allocSize;
10982  if(!suballocations2nd.empty())
10983  {
10984  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10985  resultBaseOffset = lastSuballoc.offset - allocSize;
10986  if(allocSize > lastSuballoc.offset)
10987  {
10988  return false;
10989  }
10990  }
10991 
10992  // Start from offset equal to end of free space.
10993  VkDeviceSize resultOffset = resultBaseOffset;
10994 
10995  // Apply VMA_DEBUG_MARGIN at the end.
10996  if(VMA_DEBUG_MARGIN > 0)
10997  {
10998  if(resultOffset < VMA_DEBUG_MARGIN)
10999  {
11000  return false;
11001  }
11002  resultOffset -= VMA_DEBUG_MARGIN;
11003  }
11004 
11005  // Apply alignment.
11006  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
11007 
11008  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
11009  // Make bigger alignment if necessary.
11010  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
11011  {
11012  bool bufferImageGranularityConflict = false;
11013  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
11014  {
11015  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
11016  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11017  {
11018  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
11019  {
11020  bufferImageGranularityConflict = true;
11021  break;
11022  }
11023  }
11024  else
11025  // Already on previous page.
11026  break;
11027  }
11028  if(bufferImageGranularityConflict)
11029  {
11030  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
11031  }
11032  }
11033 
11034  // There is enough free space.
11035  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
11036  suballocations1st.back().offset + suballocations1st.back().size :
11037  0;
11038  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
11039  {
11040  // Check previous suballocations for BufferImageGranularity conflicts.
11041  // If conflict exists, allocation cannot be made here.
11042  if(bufferImageGranularity > 1)
11043  {
11044  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11045  {
11046  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11047  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11048  {
11049  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
11050  {
11051  return false;
11052  }
11053  }
11054  else
11055  {
11056  // Already on next page.
11057  break;
11058  }
11059  }
11060  }
11061 
11062  // All tests passed: Success.
11063  pAllocationRequest->offset = resultOffset;
11064  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
11065  pAllocationRequest->sumItemSize = 0;
11066  // pAllocationRequest->item unused.
11067  pAllocationRequest->itemsToMakeLostCount = 0;
11068  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
11069  return true;
11070  }
11071 
11072  return false;
11073 }
11074 
11075 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
11076  uint32_t currentFrameIndex,
11077  uint32_t frameInUseCount,
11078  VkDeviceSize bufferImageGranularity,
11079  VkDeviceSize allocSize,
11080  VkDeviceSize allocAlignment,
11081  VmaSuballocationType allocType,
11082  bool canMakeOtherLost,
11083  uint32_t strategy,
11084  VmaAllocationRequest* pAllocationRequest)
11085 {
11086  const VkDeviceSize size = GetSize();
11087  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11088  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11089 
11090  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11091  {
11092  // Try to allocate at the end of 1st vector.
11093 
11094  VkDeviceSize resultBaseOffset = 0;
11095  if(!suballocations1st.empty())
11096  {
11097  const VmaSuballocation& lastSuballoc = suballocations1st.back();
11098  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11099  }
11100 
11101  // Start from offset equal to beginning of free space.
11102  VkDeviceSize resultOffset = resultBaseOffset;
11103 
11104  // Apply VMA_DEBUG_MARGIN at the beginning.
11105  if(VMA_DEBUG_MARGIN > 0)
11106  {
11107  resultOffset += VMA_DEBUG_MARGIN;
11108  }
11109 
11110  // Apply alignment.
11111  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11112 
11113  // Check previous suballocations for BufferImageGranularity conflicts.
11114  // Make bigger alignment if necessary.
11115  if(bufferImageGranularity > 1 && !suballocations1st.empty())
11116  {
11117  bool bufferImageGranularityConflict = false;
11118  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11119  {
11120  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11121  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11122  {
11123  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11124  {
11125  bufferImageGranularityConflict = true;
11126  break;
11127  }
11128  }
11129  else
11130  // Already on previous page.
11131  break;
11132  }
11133  if(bufferImageGranularityConflict)
11134  {
11135  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11136  }
11137  }
11138 
11139  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
11140  suballocations2nd.back().offset : size;
11141 
11142  // There is enough free space at the end after alignment.
11143  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
11144  {
11145  // Check next suballocations for BufferImageGranularity conflicts.
11146  // If conflict exists, allocation cannot be made here.
11147  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11148  {
11149  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
11150  {
11151  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
11152  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11153  {
11154  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11155  {
11156  return false;
11157  }
11158  }
11159  else
11160  {
11161  // Already on previous page.
11162  break;
11163  }
11164  }
11165  }
11166 
11167  // All tests passed: Success.
11168  pAllocationRequest->offset = resultOffset;
11169  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
11170  pAllocationRequest->sumItemSize = 0;
11171  // pAllocationRequest->item, customData unused.
11172  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
11173  pAllocationRequest->itemsToMakeLostCount = 0;
11174  return true;
11175  }
11176  }
11177 
11178  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
11179  // beginning of 1st vector as the end of free space.
11180  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11181  {
11182  VMA_ASSERT(!suballocations1st.empty());
11183 
11184  VkDeviceSize resultBaseOffset = 0;
11185  if(!suballocations2nd.empty())
11186  {
11187  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
11188  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11189  }
11190 
11191  // Start from offset equal to beginning of free space.
11192  VkDeviceSize resultOffset = resultBaseOffset;
11193 
11194  // Apply VMA_DEBUG_MARGIN at the beginning.
11195  if(VMA_DEBUG_MARGIN > 0)
11196  {
11197  resultOffset += VMA_DEBUG_MARGIN;
11198  }
11199 
11200  // Apply alignment.
11201  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11202 
11203  // Check previous suballocations for BufferImageGranularity conflicts.
11204  // Make bigger alignment if necessary.
11205  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
11206  {
11207  bool bufferImageGranularityConflict = false;
11208  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
11209  {
11210  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
11211  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11212  {
11213  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11214  {
11215  bufferImageGranularityConflict = true;
11216  break;
11217  }
11218  }
11219  else
11220  // Already on previous page.
11221  break;
11222  }
11223  if(bufferImageGranularityConflict)
11224  {
11225  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11226  }
11227  }
11228 
11229  pAllocationRequest->itemsToMakeLostCount = 0;
11230  pAllocationRequest->sumItemSize = 0;
11231  size_t index1st = m_1stNullItemsBeginCount;
11232 
11233  if(canMakeOtherLost)
11234  {
11235  while(index1st < suballocations1st.size() &&
11236  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
11237  {
11238  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
11239  const VmaSuballocation& suballoc = suballocations1st[index1st];
11240  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
11241  {
11242  // No problem.
11243  }
11244  else
11245  {
11246  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11247  if(suballoc.hAllocation->CanBecomeLost() &&
11248  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11249  {
11250  ++pAllocationRequest->itemsToMakeLostCount;
11251  pAllocationRequest->sumItemSize += suballoc.size;
11252  }
11253  else
11254  {
11255  return false;
11256  }
11257  }
11258  ++index1st;
11259  }
11260 
11261  // Check next suballocations for BufferImageGranularity conflicts.
11262  // If conflict exists, we must mark more allocations lost or fail.
11263  if(bufferImageGranularity > 1)
11264  {
11265  while(index1st < suballocations1st.size())
11266  {
11267  const VmaSuballocation& suballoc = suballocations1st[index1st];
11268  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
11269  {
11270  if(suballoc.hAllocation != VK_NULL_HANDLE)
11271  {
11272  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
11273  if(suballoc.hAllocation->CanBecomeLost() &&
11274  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11275  {
11276  ++pAllocationRequest->itemsToMakeLostCount;
11277  pAllocationRequest->sumItemSize += suballoc.size;
11278  }
11279  else
11280  {
11281  return false;
11282  }
11283  }
11284  }
11285  else
11286  {
11287  // Already on next page.
11288  break;
11289  }
11290  ++index1st;
11291  }
11292  }
11293 
11294  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
11295  if(index1st == suballocations1st.size() &&
11296  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
11297  {
11298  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
11299  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
11300  }
11301  }
11302 
11303  // There is enough free space at the end after alignment.
11304  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
11305  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
11306  {
11307  // Check next suballocations for BufferImageGranularity conflicts.
11308  // If conflict exists, allocation cannot be made here.
11309  if(bufferImageGranularity > 1)
11310  {
11311  for(size_t nextSuballocIndex = index1st;
11312  nextSuballocIndex < suballocations1st.size();
11313  nextSuballocIndex++)
11314  {
11315  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
11316  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11317  {
11318  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11319  {
11320  return false;
11321  }
11322  }
11323  else
11324  {
11325  // Already on next page.
11326  break;
11327  }
11328  }
11329  }
11330 
11331  // All tests passed: Success.
11332  pAllocationRequest->offset = resultOffset;
11333  pAllocationRequest->sumFreeSize =
11334  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
11335  - resultBaseOffset
11336  - pAllocationRequest->sumItemSize;
11337  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
11338  // pAllocationRequest->item, customData unused.
11339  return true;
11340  }
11341  }
11342 
11343  return false;
11344 }
11345 
11346 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
11347  uint32_t currentFrameIndex,
11348  uint32_t frameInUseCount,
11349  VmaAllocationRequest* pAllocationRequest)
11350 {
11351  if(pAllocationRequest->itemsToMakeLostCount == 0)
11352  {
11353  return true;
11354  }
11355 
11356  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
11357 
11358  // We always start from 1st.
11359  SuballocationVectorType* suballocations = &AccessSuballocations1st();
11360  size_t index = m_1stNullItemsBeginCount;
11361  size_t madeLostCount = 0;
11362  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
11363  {
11364  if(index == suballocations->size())
11365  {
11366  index = 0;
11367  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
11368  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11369  {
11370  suballocations = &AccessSuballocations2nd();
11371  }
11372  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
11373  // suballocations continues pointing at AccessSuballocations1st().
11374  VMA_ASSERT(!suballocations->empty());
11375  }
11376  VmaSuballocation& suballoc = (*suballocations)[index];
11377  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11378  {
11379  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11380  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
11381  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11382  {
11383  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11384  suballoc.hAllocation = VK_NULL_HANDLE;
11385  m_SumFreeSize += suballoc.size;
11386  if(suballocations == &AccessSuballocations1st())
11387  {
11388  ++m_1stNullItemsMiddleCount;
11389  }
11390  else
11391  {
11392  ++m_2ndNullItemsCount;
11393  }
11394  ++madeLostCount;
11395  }
11396  else
11397  {
11398  return false;
11399  }
11400  }
11401  ++index;
11402  }
11403 
11404  CleanupAfterFree();
11405  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
11406 
11407  return true;
11408 }
11409 
11410 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11411 {
11412  uint32_t lostAllocationCount = 0;
11413 
11414  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11415  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11416  {
11417  VmaSuballocation& suballoc = suballocations1st[i];
11418  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11419  suballoc.hAllocation->CanBecomeLost() &&
11420  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11421  {
11422  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11423  suballoc.hAllocation = VK_NULL_HANDLE;
11424  ++m_1stNullItemsMiddleCount;
11425  m_SumFreeSize += suballoc.size;
11426  ++lostAllocationCount;
11427  }
11428  }
11429 
11430  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11431  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11432  {
11433  VmaSuballocation& suballoc = suballocations2nd[i];
11434  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11435  suballoc.hAllocation->CanBecomeLost() &&
11436  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11437  {
11438  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11439  suballoc.hAllocation = VK_NULL_HANDLE;
11440  ++m_2ndNullItemsCount;
11441  m_SumFreeSize += suballoc.size;
11442  ++lostAllocationCount;
11443  }
11444  }
11445 
11446  if(lostAllocationCount)
11447  {
11448  CleanupAfterFree();
11449  }
11450 
11451  return lostAllocationCount;
11452 }
11453 
11454 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
11455 {
11456  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11457  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11458  {
11459  const VmaSuballocation& suballoc = suballocations1st[i];
11460  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11461  {
11462  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11463  {
11464  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11465  return VK_ERROR_VALIDATION_FAILED_EXT;
11466  }
11467  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11468  {
11469  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11470  return VK_ERROR_VALIDATION_FAILED_EXT;
11471  }
11472  }
11473  }
11474 
11475  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11476  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11477  {
11478  const VmaSuballocation& suballoc = suballocations2nd[i];
11479  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11480  {
11481  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11482  {
11483  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11484  return VK_ERROR_VALIDATION_FAILED_EXT;
11485  }
11486  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11487  {
11488  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11489  return VK_ERROR_VALIDATION_FAILED_EXT;
11490  }
11491  }
11492  }
11493 
11494  return VK_SUCCESS;
11495 }
11496 
11497 void VmaBlockMetadata_Linear::Alloc(
11498  const VmaAllocationRequest& request,
11499  VmaSuballocationType type,
11500  VkDeviceSize allocSize,
11501  VmaAllocation hAllocation)
11502 {
11503  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
11504 
11505  switch(request.type)
11506  {
11507  case VmaAllocationRequestType::UpperAddress:
11508  {
11509  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
11510  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
11511  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11512  suballocations2nd.push_back(newSuballoc);
11513  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
11514  }
11515  break;
11516  case VmaAllocationRequestType::EndOf1st:
11517  {
11518  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11519 
11520  VMA_ASSERT(suballocations1st.empty() ||
11521  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
11522  // Check if it fits before the end of the block.
11523  VMA_ASSERT(request.offset + allocSize <= GetSize());
11524 
11525  suballocations1st.push_back(newSuballoc);
11526  }
11527  break;
11528  case VmaAllocationRequestType::EndOf2nd:
11529  {
11530  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11531  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
11532  VMA_ASSERT(!suballocations1st.empty() &&
11533  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
11534  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11535 
11536  switch(m_2ndVectorMode)
11537  {
11538  case SECOND_VECTOR_EMPTY:
11539  // First allocation from second part ring buffer.
11540  VMA_ASSERT(suballocations2nd.empty());
11541  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
11542  break;
11543  case SECOND_VECTOR_RING_BUFFER:
11544  // 2-part ring buffer is already started.
11545  VMA_ASSERT(!suballocations2nd.empty());
11546  break;
11547  case SECOND_VECTOR_DOUBLE_STACK:
11548  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
11549  break;
11550  default:
11551  VMA_ASSERT(0);
11552  }
11553 
11554  suballocations2nd.push_back(newSuballoc);
11555  }
11556  break;
11557  default:
11558  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
11559  }
11560 
11561  m_SumFreeSize -= newSuballoc.size;
11562 }
11563 
11564 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
11565 {
11566  FreeAtOffset(allocation->GetOffset());
11567 }
11568 
11569 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
11570 {
11571  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11572  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11573 
11574  if(!suballocations1st.empty())
11575  {
11576  // First allocation: Mark it as next empty at the beginning.
11577  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
11578  if(firstSuballoc.offset == offset)
11579  {
11580  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11581  firstSuballoc.hAllocation = VK_NULL_HANDLE;
11582  m_SumFreeSize += firstSuballoc.size;
11583  ++m_1stNullItemsBeginCount;
11584  CleanupAfterFree();
11585  return;
11586  }
11587  }
11588 
11589  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
11590  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
11591  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11592  {
11593  VmaSuballocation& lastSuballoc = suballocations2nd.back();
11594  if(lastSuballoc.offset == offset)
11595  {
11596  m_SumFreeSize += lastSuballoc.size;
11597  suballocations2nd.pop_back();
11598  CleanupAfterFree();
11599  return;
11600  }
11601  }
11602  // Last allocation in 1st vector.
11603  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
11604  {
11605  VmaSuballocation& lastSuballoc = suballocations1st.back();
11606  if(lastSuballoc.offset == offset)
11607  {
11608  m_SumFreeSize += lastSuballoc.size;
11609  suballocations1st.pop_back();
11610  CleanupAfterFree();
11611  return;
11612  }
11613  }
11614 
11615  // Item from the middle of 1st vector.
11616  {
11617  VmaSuballocation refSuballoc;
11618  refSuballoc.offset = offset;
11619  // Rest of members stays uninitialized intentionally for better performance.
11620  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11621  suballocations1st.begin() + m_1stNullItemsBeginCount,
11622  suballocations1st.end(),
11623  refSuballoc,
11624  VmaSuballocationOffsetLess());
11625  if(it != suballocations1st.end())
11626  {
11627  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11628  it->hAllocation = VK_NULL_HANDLE;
11629  ++m_1stNullItemsMiddleCount;
11630  m_SumFreeSize += it->size;
11631  CleanupAfterFree();
11632  return;
11633  }
11634  }
11635 
11636  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11637  {
11638  // Item from the middle of 2nd vector.
11639  VmaSuballocation refSuballoc;
11640  refSuballoc.offset = offset;
11641  // Rest of members stays uninitialized intentionally for better performance.
11642  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11643  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11644  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11645  if(it != suballocations2nd.end())
11646  {
11647  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11648  it->hAllocation = VK_NULL_HANDLE;
11649  ++m_2ndNullItemsCount;
11650  m_SumFreeSize += it->size;
11651  CleanupAfterFree();
11652  return;
11653  }
11654  }
11655 
11656  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11657 }
11658 
11659 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11660 {
11661  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11662  const size_t suballocCount = AccessSuballocations1st().size();
11663  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11664 }
11665 
11666 void VmaBlockMetadata_Linear::CleanupAfterFree()
11667 {
11668  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11669  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11670 
11671  if(IsEmpty())
11672  {
11673  suballocations1st.clear();
11674  suballocations2nd.clear();
11675  m_1stNullItemsBeginCount = 0;
11676  m_1stNullItemsMiddleCount = 0;
11677  m_2ndNullItemsCount = 0;
11678  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11679  }
11680  else
11681  {
11682  const size_t suballoc1stCount = suballocations1st.size();
11683  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11684  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
11685 
11686  // Find more null items at the beginning of 1st vector.
11687  while(m_1stNullItemsBeginCount < suballoc1stCount &&
11688  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11689  {
11690  ++m_1stNullItemsBeginCount;
11691  --m_1stNullItemsMiddleCount;
11692  }
11693 
11694  // Find more null items at the end of 1st vector.
11695  while(m_1stNullItemsMiddleCount > 0 &&
11696  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
11697  {
11698  --m_1stNullItemsMiddleCount;
11699  suballocations1st.pop_back();
11700  }
11701 
11702  // Find more null items at the end of 2nd vector.
11703  while(m_2ndNullItemsCount > 0 &&
11704  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
11705  {
11706  --m_2ndNullItemsCount;
11707  suballocations2nd.pop_back();
11708  }
11709 
11710  // Find more null items at the beginning of 2nd vector.
11711  while(m_2ndNullItemsCount > 0 &&
11712  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
11713  {
11714  --m_2ndNullItemsCount;
11715  VmaVectorRemove(suballocations2nd, 0);
11716  }
11717 
11718  if(ShouldCompact1st())
11719  {
11720  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
11721  size_t srcIndex = m_1stNullItemsBeginCount;
11722  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
11723  {
11724  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
11725  {
11726  ++srcIndex;
11727  }
11728  if(dstIndex != srcIndex)
11729  {
11730  suballocations1st[dstIndex] = suballocations1st[srcIndex];
11731  }
11732  ++srcIndex;
11733  }
11734  suballocations1st.resize(nonNullItemCount);
11735  m_1stNullItemsBeginCount = 0;
11736  m_1stNullItemsMiddleCount = 0;
11737  }
11738 
11739  // 2nd vector became empty.
11740  if(suballocations2nd.empty())
11741  {
11742  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11743  }
11744 
11745  // 1st vector became empty.
11746  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
11747  {
11748  suballocations1st.clear();
11749  m_1stNullItemsBeginCount = 0;
11750 
11751  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11752  {
11753  // Swap 1st with 2nd. Now 2nd is empty.
11754  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11755  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
11756  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
11757  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11758  {
11759  ++m_1stNullItemsBeginCount;
11760  --m_1stNullItemsMiddleCount;
11761  }
11762  m_2ndNullItemsCount = 0;
11763  m_1stVectorIndex ^= 1;
11764  }
11765  }
11766  }
11767 
11768  VMA_HEAVY_ASSERT(Validate());
11769 }
11770 
11771 
11773 // class VmaBlockMetadata_Buddy
11774 
11775 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
11776  VmaBlockMetadata(hAllocator),
11777  m_Root(VMA_NULL),
11778  m_AllocationCount(0),
11779  m_FreeCount(1),
11780  m_SumFreeSize(0)
11781 {
11782  memset(m_FreeList, 0, sizeof(m_FreeList));
11783 }
11784 
11785 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
11786 {
11787  DeleteNode(m_Root);
11788 }
11789 
11790 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
11791 {
11792  VmaBlockMetadata::Init(size);
11793 
11794  m_UsableSize = VmaPrevPow2(size);
11795  m_SumFreeSize = m_UsableSize;
11796 
11797  // Calculate m_LevelCount.
11798  m_LevelCount = 1;
11799  while(m_LevelCount < MAX_LEVELS &&
11800  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
11801  {
11802  ++m_LevelCount;
11803  }
11804 
11805  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
11806  rootNode->offset = 0;
11807  rootNode->type = Node::TYPE_FREE;
11808  rootNode->parent = VMA_NULL;
11809  rootNode->buddy = VMA_NULL;
11810 
11811  m_Root = rootNode;
11812  AddToFreeListFront(0, rootNode);
11813 }
11814 
11815 bool VmaBlockMetadata_Buddy::Validate() const
11816 {
11817  // Validate tree.
11818  ValidationContext ctx;
11819  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
11820  {
11821  VMA_VALIDATE(false && "ValidateNode failed.");
11822  }
11823  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
11824  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
11825 
11826  // Validate free node lists.
11827  for(uint32_t level = 0; level < m_LevelCount; ++level)
11828  {
11829  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
11830  m_FreeList[level].front->free.prev == VMA_NULL);
11831 
11832  for(Node* node = m_FreeList[level].front;
11833  node != VMA_NULL;
11834  node = node->free.next)
11835  {
11836  VMA_VALIDATE(node->type == Node::TYPE_FREE);
11837 
11838  if(node->free.next == VMA_NULL)
11839  {
11840  VMA_VALIDATE(m_FreeList[level].back == node);
11841  }
11842  else
11843  {
11844  VMA_VALIDATE(node->free.next->free.prev == node);
11845  }
11846  }
11847  }
11848 
11849  // Validate that free lists ar higher levels are empty.
11850  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
11851  {
11852  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
11853  }
11854 
11855  return true;
11856 }
11857 
11858 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
11859 {
11860  for(uint32_t level = 0; level < m_LevelCount; ++level)
11861  {
11862  if(m_FreeList[level].front != VMA_NULL)
11863  {
11864  return LevelToNodeSize(level);
11865  }
11866  }
11867  return 0;
11868 }
11869 
11870 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
11871 {
11872  const VkDeviceSize unusableSize = GetUnusableSize();
11873 
11874  outInfo.blockCount = 1;
11875 
11876  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
11877  outInfo.usedBytes = outInfo.unusedBytes = 0;
11878 
11879  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11880  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11881  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11882 
11883  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11884 
11885  if(unusableSize > 0)
11886  {
11887  ++outInfo.unusedRangeCount;
11888  outInfo.unusedBytes += unusableSize;
11889  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11890  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11891  }
11892 }
11893 
11894 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11895 {
11896  const VkDeviceSize unusableSize = GetUnusableSize();
11897 
11898  inoutStats.size += GetSize();
11899  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11900  inoutStats.allocationCount += m_AllocationCount;
11901  inoutStats.unusedRangeCount += m_FreeCount;
11902  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11903 
11904  if(unusableSize > 0)
11905  {
11906  ++inoutStats.unusedRangeCount;
11907  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11908  }
11909 }
11910 
11911 #if VMA_STATS_STRING_ENABLED
11912 
11913 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11914 {
11915  // TODO optimize
11916  VmaStatInfo stat;
11917  CalcAllocationStatInfo(stat);
11918 
11919  PrintDetailedMap_Begin(
11920  json,
11921  stat.unusedBytes,
11922  stat.allocationCount,
11923  stat.unusedRangeCount);
11924 
11925  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11926 
11927  const VkDeviceSize unusableSize = GetUnusableSize();
11928  if(unusableSize > 0)
11929  {
11930  PrintDetailedMap_UnusedRange(json,
11931  m_UsableSize, // offset
11932  unusableSize); // size
11933  }
11934 
11935  PrintDetailedMap_End(json);
11936 }
11937 
11938 #endif // #if VMA_STATS_STRING_ENABLED
11939 
11940 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11941  uint32_t currentFrameIndex,
11942  uint32_t frameInUseCount,
11943  VkDeviceSize bufferImageGranularity,
11944  VkDeviceSize allocSize,
11945  VkDeviceSize allocAlignment,
11946  bool upperAddress,
11947  VmaSuballocationType allocType,
11948  bool canMakeOtherLost,
11949  uint32_t strategy,
11950  VmaAllocationRequest* pAllocationRequest)
11951 {
11952  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11953 
11954  // Simple way to respect bufferImageGranularity. May be optimized some day.
11955  // Whenever it might be an OPTIMAL image...
11956  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11957  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11958  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11959  {
11960  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11961  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11962  }
11963 
11964  if(allocSize > m_UsableSize)
11965  {
11966  return false;
11967  }
11968 
11969  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11970  for(uint32_t level = targetLevel + 1; level--; )
11971  {
11972  for(Node* freeNode = m_FreeList[level].front;
11973  freeNode != VMA_NULL;
11974  freeNode = freeNode->free.next)
11975  {
11976  if(freeNode->offset % allocAlignment == 0)
11977  {
11978  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11979  pAllocationRequest->offset = freeNode->offset;
11980  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11981  pAllocationRequest->sumItemSize = 0;
11982  pAllocationRequest->itemsToMakeLostCount = 0;
11983  pAllocationRequest->customData = (void*)(uintptr_t)level;
11984  return true;
11985  }
11986  }
11987  }
11988 
11989  return false;
11990 }
11991 
11992 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11993  uint32_t currentFrameIndex,
11994  uint32_t frameInUseCount,
11995  VmaAllocationRequest* pAllocationRequest)
11996 {
11997  /*
11998  Lost allocations are not supported in buddy allocator at the moment.
11999  Support might be added in the future.
12000  */
12001  return pAllocationRequest->itemsToMakeLostCount == 0;
12002 }
12003 
12004 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
12005 {
12006  /*
12007  Lost allocations are not supported in buddy allocator at the moment.
12008  Support might be added in the future.
12009  */
12010  return 0;
12011 }
12012 
12013 void VmaBlockMetadata_Buddy::Alloc(
12014  const VmaAllocationRequest& request,
12015  VmaSuballocationType type,
12016  VkDeviceSize allocSize,
12017  VmaAllocation hAllocation)
12018 {
12019  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
12020 
12021  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
12022  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
12023 
12024  Node* currNode = m_FreeList[currLevel].front;
12025  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
12026  while(currNode->offset != request.offset)
12027  {
12028  currNode = currNode->free.next;
12029  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
12030  }
12031 
12032  // Go down, splitting free nodes.
12033  while(currLevel < targetLevel)
12034  {
12035  // currNode is already first free node at currLevel.
12036  // Remove it from list of free nodes at this currLevel.
12037  RemoveFromFreeList(currLevel, currNode);
12038 
12039  const uint32_t childrenLevel = currLevel + 1;
12040 
12041  // Create two free sub-nodes.
12042  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
12043  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
12044 
12045  leftChild->offset = currNode->offset;
12046  leftChild->type = Node::TYPE_FREE;
12047  leftChild->parent = currNode;
12048  leftChild->buddy = rightChild;
12049 
12050  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
12051  rightChild->type = Node::TYPE_FREE;
12052  rightChild->parent = currNode;
12053  rightChild->buddy = leftChild;
12054 
12055  // Convert current currNode to split type.
12056  currNode->type = Node::TYPE_SPLIT;
12057  currNode->split.leftChild = leftChild;
12058 
12059  // Add child nodes to free list. Order is important!
12060  AddToFreeListFront(childrenLevel, rightChild);
12061  AddToFreeListFront(childrenLevel, leftChild);
12062 
12063  ++m_FreeCount;
12064  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
12065  ++currLevel;
12066  currNode = m_FreeList[currLevel].front;
12067 
12068  /*
12069  We can be sure that currNode, as left child of node previously split,
12070  also fullfills the alignment requirement.
12071  */
12072  }
12073 
12074  // Remove from free list.
12075  VMA_ASSERT(currLevel == targetLevel &&
12076  currNode != VMA_NULL &&
12077  currNode->type == Node::TYPE_FREE);
12078  RemoveFromFreeList(currLevel, currNode);
12079 
12080  // Convert to allocation node.
12081  currNode->type = Node::TYPE_ALLOCATION;
12082  currNode->allocation.alloc = hAllocation;
12083 
12084  ++m_AllocationCount;
12085  --m_FreeCount;
12086  m_SumFreeSize -= allocSize;
12087 }
12088 
12089 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
12090 {
12091  if(node->type == Node::TYPE_SPLIT)
12092  {
12093  DeleteNode(node->split.leftChild->buddy);
12094  DeleteNode(node->split.leftChild);
12095  }
12096 
12097  vma_delete(GetAllocationCallbacks(), node);
12098 }
12099 
12100 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
12101 {
12102  VMA_VALIDATE(level < m_LevelCount);
12103  VMA_VALIDATE(curr->parent == parent);
12104  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
12105  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
12106  switch(curr->type)
12107  {
12108  case Node::TYPE_FREE:
12109  // curr->free.prev, next are validated separately.
12110  ctx.calculatedSumFreeSize += levelNodeSize;
12111  ++ctx.calculatedFreeCount;
12112  break;
12113  case Node::TYPE_ALLOCATION:
12114  ++ctx.calculatedAllocationCount;
12115  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
12116  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
12117  break;
12118  case Node::TYPE_SPLIT:
12119  {
12120  const uint32_t childrenLevel = level + 1;
12121  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
12122  const Node* const leftChild = curr->split.leftChild;
12123  VMA_VALIDATE(leftChild != VMA_NULL);
12124  VMA_VALIDATE(leftChild->offset == curr->offset);
12125  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
12126  {
12127  VMA_VALIDATE(false && "ValidateNode for left child failed.");
12128  }
12129  const Node* const rightChild = leftChild->buddy;
12130  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
12131  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
12132  {
12133  VMA_VALIDATE(false && "ValidateNode for right child failed.");
12134  }
12135  }
12136  break;
12137  default:
12138  return false;
12139  }
12140 
12141  return true;
12142 }
12143 
12144 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
12145 {
12146  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
12147  uint32_t level = 0;
12148  VkDeviceSize currLevelNodeSize = m_UsableSize;
12149  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
12150  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
12151  {
12152  ++level;
12153  currLevelNodeSize = nextLevelNodeSize;
12154  nextLevelNodeSize = currLevelNodeSize >> 1;
12155  }
12156  return level;
12157 }
12158 
12159 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
12160 {
12161  // Find node and level.
12162  Node* node = m_Root;
12163  VkDeviceSize nodeOffset = 0;
12164  uint32_t level = 0;
12165  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
12166  while(node->type == Node::TYPE_SPLIT)
12167  {
12168  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
12169  if(offset < nodeOffset + nextLevelSize)
12170  {
12171  node = node->split.leftChild;
12172  }
12173  else
12174  {
12175  node = node->split.leftChild->buddy;
12176  nodeOffset += nextLevelSize;
12177  }
12178  ++level;
12179  levelNodeSize = nextLevelSize;
12180  }
12181 
12182  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
12183  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
12184 
12185  ++m_FreeCount;
12186  --m_AllocationCount;
12187  m_SumFreeSize += alloc->GetSize();
12188 
12189  node->type = Node::TYPE_FREE;
12190 
12191  // Join free nodes if possible.
12192  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
12193  {
12194  RemoveFromFreeList(level, node->buddy);
12195  Node* const parent = node->parent;
12196 
12197  vma_delete(GetAllocationCallbacks(), node->buddy);
12198  vma_delete(GetAllocationCallbacks(), node);
12199  parent->type = Node::TYPE_FREE;
12200 
12201  node = parent;
12202  --level;
12203  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
12204  --m_FreeCount;
12205  }
12206 
12207  AddToFreeListFront(level, node);
12208 }
12209 
12210 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
12211 {
12212  switch(node->type)
12213  {
12214  case Node::TYPE_FREE:
12215  ++outInfo.unusedRangeCount;
12216  outInfo.unusedBytes += levelNodeSize;
12217  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
12218  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
12219  break;
12220  case Node::TYPE_ALLOCATION:
12221  {
12222  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12223  ++outInfo.allocationCount;
12224  outInfo.usedBytes += allocSize;
12225  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
12226  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
12227 
12228  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
12229  if(unusedRangeSize > 0)
12230  {
12231  ++outInfo.unusedRangeCount;
12232  outInfo.unusedBytes += unusedRangeSize;
12233  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
12234  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
12235  }
12236  }
12237  break;
12238  case Node::TYPE_SPLIT:
12239  {
12240  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12241  const Node* const leftChild = node->split.leftChild;
12242  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
12243  const Node* const rightChild = leftChild->buddy;
12244  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
12245  }
12246  break;
12247  default:
12248  VMA_ASSERT(0);
12249  }
12250 }
12251 
12252 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
12253 {
12254  VMA_ASSERT(node->type == Node::TYPE_FREE);
12255 
12256  // List is empty.
12257  Node* const frontNode = m_FreeList[level].front;
12258  if(frontNode == VMA_NULL)
12259  {
12260  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
12261  node->free.prev = node->free.next = VMA_NULL;
12262  m_FreeList[level].front = m_FreeList[level].back = node;
12263  }
12264  else
12265  {
12266  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
12267  node->free.prev = VMA_NULL;
12268  node->free.next = frontNode;
12269  frontNode->free.prev = node;
12270  m_FreeList[level].front = node;
12271  }
12272 }
12273 
12274 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
12275 {
12276  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
12277 
12278  // It is at the front.
12279  if(node->free.prev == VMA_NULL)
12280  {
12281  VMA_ASSERT(m_FreeList[level].front == node);
12282  m_FreeList[level].front = node->free.next;
12283  }
12284  else
12285  {
12286  Node* const prevFreeNode = node->free.prev;
12287  VMA_ASSERT(prevFreeNode->free.next == node);
12288  prevFreeNode->free.next = node->free.next;
12289  }
12290 
12291  // It is at the back.
12292  if(node->free.next == VMA_NULL)
12293  {
12294  VMA_ASSERT(m_FreeList[level].back == node);
12295  m_FreeList[level].back = node->free.prev;
12296  }
12297  else
12298  {
12299  Node* const nextFreeNode = node->free.next;
12300  VMA_ASSERT(nextFreeNode->free.prev == node);
12301  nextFreeNode->free.prev = node->free.prev;
12302  }
12303 }
12304 
12305 #if VMA_STATS_STRING_ENABLED
12306 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
12307 {
12308  switch(node->type)
12309  {
12310  case Node::TYPE_FREE:
12311  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
12312  break;
12313  case Node::TYPE_ALLOCATION:
12314  {
12315  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
12316  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12317  if(allocSize < levelNodeSize)
12318  {
12319  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
12320  }
12321  }
12322  break;
12323  case Node::TYPE_SPLIT:
12324  {
12325  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12326  const Node* const leftChild = node->split.leftChild;
12327  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
12328  const Node* const rightChild = leftChild->buddy;
12329  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
12330  }
12331  break;
12332  default:
12333  VMA_ASSERT(0);
12334  }
12335 }
12336 #endif // #if VMA_STATS_STRING_ENABLED
12337 
12338 
12340 // class VmaDeviceMemoryBlock
12341 
12342 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
12343  m_pMetadata(VMA_NULL),
12344  m_MemoryTypeIndex(UINT32_MAX),
12345  m_Id(0),
12346  m_hMemory(VK_NULL_HANDLE),
12347  m_MapCount(0),
12348  m_pMappedData(VMA_NULL)
12349 {
12350 }
12351 
12352 void VmaDeviceMemoryBlock::Init(
12353  VmaAllocator hAllocator,
12354  VmaPool hParentPool,
12355  uint32_t newMemoryTypeIndex,
12356  VkDeviceMemory newMemory,
12357  VkDeviceSize newSize,
12358  uint32_t id,
12359  uint32_t algorithm)
12360 {
12361  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
12362 
12363  m_hParentPool = hParentPool;
12364  m_MemoryTypeIndex = newMemoryTypeIndex;
12365  m_Id = id;
12366  m_hMemory = newMemory;
12367 
12368  switch(algorithm)
12369  {
12371  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
12372  break;
12374  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
12375  break;
12376  default:
12377  VMA_ASSERT(0);
12378  // Fall-through.
12379  case 0:
12380  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
12381  }
12382  m_pMetadata->Init(newSize);
12383 }
12384 
12385 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
12386 {
12387  // This is the most important assert in the entire library.
12388  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
12389  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
12390 
12391  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
12392  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
12393  m_hMemory = VK_NULL_HANDLE;
12394 
12395  vma_delete(allocator, m_pMetadata);
12396  m_pMetadata = VMA_NULL;
12397 }
12398 
12399 bool VmaDeviceMemoryBlock::Validate() const
12400 {
12401  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
12402  (m_pMetadata->GetSize() != 0));
12403 
12404  return m_pMetadata->Validate();
12405 }
12406 
12407 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
12408 {
12409  void* pData = nullptr;
12410  VkResult res = Map(hAllocator, 1, &pData);
12411  if(res != VK_SUCCESS)
12412  {
12413  return res;
12414  }
12415 
12416  res = m_pMetadata->CheckCorruption(pData);
12417 
12418  Unmap(hAllocator, 1);
12419 
12420  return res;
12421 }
12422 
12423 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
12424 {
12425  if(count == 0)
12426  {
12427  return VK_SUCCESS;
12428  }
12429 
12430  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12431  if(m_MapCount != 0)
12432  {
12433  m_MapCount += count;
12434  VMA_ASSERT(m_pMappedData != VMA_NULL);
12435  if(ppData != VMA_NULL)
12436  {
12437  *ppData = m_pMappedData;
12438  }
12439  return VK_SUCCESS;
12440  }
12441  else
12442  {
12443  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
12444  hAllocator->m_hDevice,
12445  m_hMemory,
12446  0, // offset
12447  VK_WHOLE_SIZE,
12448  0, // flags
12449  &m_pMappedData);
12450  if(result == VK_SUCCESS)
12451  {
12452  if(ppData != VMA_NULL)
12453  {
12454  *ppData = m_pMappedData;
12455  }
12456  m_MapCount = count;
12457  }
12458  return result;
12459  }
12460 }
12461 
12462 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
12463 {
12464  if(count == 0)
12465  {
12466  return;
12467  }
12468 
12469  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12470  if(m_MapCount >= count)
12471  {
12472  m_MapCount -= count;
12473  if(m_MapCount == 0)
12474  {
12475  m_pMappedData = VMA_NULL;
12476  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
12477  }
12478  }
12479  else
12480  {
12481  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
12482  }
12483 }
12484 
12485 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12486 {
12487  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12488  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12489 
12490  void* pData;
12491  VkResult res = Map(hAllocator, 1, &pData);
12492  if(res != VK_SUCCESS)
12493  {
12494  return res;
12495  }
12496 
12497  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
12498  VmaWriteMagicValue(pData, allocOffset + allocSize);
12499 
12500  Unmap(hAllocator, 1);
12501 
12502  return VK_SUCCESS;
12503 }
12504 
12505 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12506 {
12507  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12508  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12509 
12510  void* pData;
12511  VkResult res = Map(hAllocator, 1, &pData);
12512  if(res != VK_SUCCESS)
12513  {
12514  return res;
12515  }
12516 
12517  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
12518  {
12519  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
12520  }
12521  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
12522  {
12523  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
12524  }
12525 
12526  Unmap(hAllocator, 1);
12527 
12528  return VK_SUCCESS;
12529 }
12530 
12531 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
12532  const VmaAllocator hAllocator,
12533  const VmaAllocation hAllocation,
12534  VkDeviceSize allocationLocalOffset,
12535  VkBuffer hBuffer,
12536  const void* pNext)
12537 {
12538  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12539  hAllocation->GetBlock() == this);
12540  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12541  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12542  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12543  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12544  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12545  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
12546 }
12547 
12548 VkResult VmaDeviceMemoryBlock::BindImageMemory(
12549  const VmaAllocator hAllocator,
12550  const VmaAllocation hAllocation,
12551  VkDeviceSize allocationLocalOffset,
12552  VkImage hImage,
12553  const void* pNext)
12554 {
12555  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12556  hAllocation->GetBlock() == this);
12557  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12558  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12559  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12560  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12561  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12562  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
12563 }
12564 
12565 static void InitStatInfo(VmaStatInfo& outInfo)
12566 {
12567  memset(&outInfo, 0, sizeof(outInfo));
12568  outInfo.allocationSizeMin = UINT64_MAX;
12569  outInfo.unusedRangeSizeMin = UINT64_MAX;
12570 }
12571 
12572 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
12573 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
12574 {
12575  inoutInfo.blockCount += srcInfo.blockCount;
12576  inoutInfo.allocationCount += srcInfo.allocationCount;
12577  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
12578  inoutInfo.usedBytes += srcInfo.usedBytes;
12579  inoutInfo.unusedBytes += srcInfo.unusedBytes;
12580  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
12581  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
12582  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
12583  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
12584 }
12585 
12586 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
12587 {
12588  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
12589  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
12590  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
12591  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
12592 }
12593 
12594 VmaPool_T::VmaPool_T(
12595  VmaAllocator hAllocator,
12596  const VmaPoolCreateInfo& createInfo,
12597  VkDeviceSize preferredBlockSize) :
12598  m_BlockVector(
12599  hAllocator,
12600  this, // hParentPool
12601  createInfo.memoryTypeIndex,
12602  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
12603  createInfo.minBlockCount,
12604  createInfo.maxBlockCount,
12605  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
12606  createInfo.frameInUseCount,
12607  createInfo.blockSize != 0, // explicitBlockSize
12608  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK,
12609  createInfo.priority), // algorithm
12610  m_Id(0),
12611  m_Name(VMA_NULL)
12612 {
12613 }
12614 
12615 VmaPool_T::~VmaPool_T()
12616 {
12617 }
12618 
12619 void VmaPool_T::SetName(const char* pName)
12620 {
12621  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12622  VmaFreeString(allocs, m_Name);
12623 
12624  if(pName != VMA_NULL)
12625  {
12626  m_Name = VmaCreateStringCopy(allocs, pName);
12627  }
12628  else
12629  {
12630  m_Name = VMA_NULL;
12631  }
12632 }
12633 
12634 #if VMA_STATS_STRING_ENABLED
12635 
12636 #endif // #if VMA_STATS_STRING_ENABLED
12637 
12638 VmaBlockVector::VmaBlockVector(
12639  VmaAllocator hAllocator,
12640  VmaPool hParentPool,
12641  uint32_t memoryTypeIndex,
12642  VkDeviceSize preferredBlockSize,
12643  size_t minBlockCount,
12644  size_t maxBlockCount,
12645  VkDeviceSize bufferImageGranularity,
12646  uint32_t frameInUseCount,
12647  bool explicitBlockSize,
12648  uint32_t algorithm,
12649  float priority) :
12650  m_hAllocator(hAllocator),
12651  m_hParentPool(hParentPool),
12652  m_MemoryTypeIndex(memoryTypeIndex),
12653  m_PreferredBlockSize(preferredBlockSize),
12654  m_MinBlockCount(minBlockCount),
12655  m_MaxBlockCount(maxBlockCount),
12656  m_BufferImageGranularity(bufferImageGranularity),
12657  m_FrameInUseCount(frameInUseCount),
12658  m_ExplicitBlockSize(explicitBlockSize),
12659  m_Algorithm(algorithm),
12660  m_Priority(priority),
12661  m_HasEmptyBlock(false),
12662  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12663  m_NextBlockId(0)
12664 {
12665 }
12666 
12667 VmaBlockVector::~VmaBlockVector()
12668 {
12669  for(size_t i = m_Blocks.size(); i--; )
12670  {
12671  m_Blocks[i]->Destroy(m_hAllocator);
12672  vma_delete(m_hAllocator, m_Blocks[i]);
12673  }
12674 }
12675 
12676 VkResult VmaBlockVector::CreateMinBlocks()
12677 {
12678  for(size_t i = 0; i < m_MinBlockCount; ++i)
12679  {
12680  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12681  if(res != VK_SUCCESS)
12682  {
12683  return res;
12684  }
12685  }
12686  return VK_SUCCESS;
12687 }
12688 
12689 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
12690 {
12691  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12692 
12693  const size_t blockCount = m_Blocks.size();
12694 
12695  pStats->size = 0;
12696  pStats->unusedSize = 0;
12697  pStats->allocationCount = 0;
12698  pStats->unusedRangeCount = 0;
12699  pStats->unusedRangeSizeMax = 0;
12700  pStats->blockCount = blockCount;
12701 
12702  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12703  {
12704  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12705  VMA_ASSERT(pBlock);
12706  VMA_HEAVY_ASSERT(pBlock->Validate());
12707  pBlock->m_pMetadata->AddPoolStats(*pStats);
12708  }
12709 }
12710 
12711 bool VmaBlockVector::IsEmpty()
12712 {
12713  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12714  return m_Blocks.empty();
12715 }
12716 
12717 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12718 {
12719  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12720  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12721  (VMA_DEBUG_MARGIN > 0) &&
12722  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12723  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12724 }
12725 
12726 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
12727 
12728 VkResult VmaBlockVector::Allocate(
12729  uint32_t currentFrameIndex,
12730  VkDeviceSize size,
12731  VkDeviceSize alignment,
12732  const VmaAllocationCreateInfo& createInfo,
12733  VmaSuballocationType suballocType,
12734  size_t allocationCount,
12735  VmaAllocation* pAllocations)
12736 {
12737  size_t allocIndex;
12738  VkResult res = VK_SUCCESS;
12739 
12740  if(IsCorruptionDetectionEnabled())
12741  {
12742  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12743  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12744  }
12745 
12746  {
12747  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12748  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12749  {
12750  res = AllocatePage(
12751  currentFrameIndex,
12752  size,
12753  alignment,
12754  createInfo,
12755  suballocType,
12756  pAllocations + allocIndex);
12757  if(res != VK_SUCCESS)
12758  {
12759  break;
12760  }
12761  }
12762  }
12763 
12764  if(res != VK_SUCCESS)
12765  {
12766  // Free all already created allocations.
12767  while(allocIndex--)
12768  {
12769  Free(pAllocations[allocIndex]);
12770  }
12771  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12772  }
12773 
12774  return res;
12775 }
12776 
12777 VkResult VmaBlockVector::AllocatePage(
12778  uint32_t currentFrameIndex,
12779  VkDeviceSize size,
12780  VkDeviceSize alignment,
12781  const VmaAllocationCreateInfo& createInfo,
12782  VmaSuballocationType suballocType,
12783  VmaAllocation* pAllocation)
12784 {
12785  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12786  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
12787  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12788  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12789 
12790  VkDeviceSize freeMemory;
12791  {
12792  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12793  VmaBudget heapBudget = {};
12794  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12795  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12796  }
12797 
12798  const bool canFallbackToDedicated = !IsCustomPool();
12799  const bool canCreateNewBlock =
12800  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12801  (m_Blocks.size() < m_MaxBlockCount) &&
12802  (freeMemory >= size || !canFallbackToDedicated);
12803  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12804 
12805  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
12806  // Which in turn is available only when maxBlockCount = 1.
12807  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
12808  {
12809  canMakeOtherLost = false;
12810  }
12811 
12812  // Upper address can only be used with linear allocator and within single memory block.
12813  if(isUpperAddress &&
12814  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12815  {
12816  return VK_ERROR_FEATURE_NOT_PRESENT;
12817  }
12818 
12819  // Validate strategy.
12820  switch(strategy)
12821  {
12822  case 0:
12824  break;
12828  break;
12829  default:
12830  return VK_ERROR_FEATURE_NOT_PRESENT;
12831  }
12832 
12833  // Early reject: requested allocation size is larger that maximum block size for this block vector.
12834  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12835  {
12836  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12837  }
12838 
12839  /*
12840  Under certain condition, this whole section can be skipped for optimization, so
12841  we move on directly to trying to allocate with canMakeOtherLost. That's the case
12842  e.g. for custom pools with linear algorithm.
12843  */
12844  if(!canMakeOtherLost || canCreateNewBlock)
12845  {
12846  // 1. Search existing allocations. Try to allocate without making other allocations lost.
12847  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
12849 
12850  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12851  {
12852  // Use only last block.
12853  if(!m_Blocks.empty())
12854  {
12855  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12856  VMA_ASSERT(pCurrBlock);
12857  VkResult res = AllocateFromBlock(
12858  pCurrBlock,
12859  currentFrameIndex,
12860  size,
12861  alignment,
12862  allocFlagsCopy,
12863  createInfo.pUserData,
12864  suballocType,
12865  strategy,
12866  pAllocation);
12867  if(res == VK_SUCCESS)
12868  {
12869  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
12870  return VK_SUCCESS;
12871  }
12872  }
12873  }
12874  else
12875  {
12877  {
12878  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12879  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12880  {
12881  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12882  VMA_ASSERT(pCurrBlock);
12883  VkResult res = AllocateFromBlock(
12884  pCurrBlock,
12885  currentFrameIndex,
12886  size,
12887  alignment,
12888  allocFlagsCopy,
12889  createInfo.pUserData,
12890  suballocType,
12891  strategy,
12892  pAllocation);
12893  if(res == VK_SUCCESS)
12894  {
12895  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12896  return VK_SUCCESS;
12897  }
12898  }
12899  }
12900  else // WORST_FIT, FIRST_FIT
12901  {
12902  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12903  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12904  {
12905  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12906  VMA_ASSERT(pCurrBlock);
12907  VkResult res = AllocateFromBlock(
12908  pCurrBlock,
12909  currentFrameIndex,
12910  size,
12911  alignment,
12912  allocFlagsCopy,
12913  createInfo.pUserData,
12914  suballocType,
12915  strategy,
12916  pAllocation);
12917  if(res == VK_SUCCESS)
12918  {
12919  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12920  return VK_SUCCESS;
12921  }
12922  }
12923  }
12924  }
12925 
12926  // 2. Try to create new block.
12927  if(canCreateNewBlock)
12928  {
12929  // Calculate optimal size for new block.
12930  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12931  uint32_t newBlockSizeShift = 0;
12932  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12933 
12934  if(!m_ExplicitBlockSize)
12935  {
12936  // Allocate 1/8, 1/4, 1/2 as first blocks.
12937  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12938  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12939  {
12940  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12941  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12942  {
12943  newBlockSize = smallerNewBlockSize;
12944  ++newBlockSizeShift;
12945  }
12946  else
12947  {
12948  break;
12949  }
12950  }
12951  }
12952 
12953  size_t newBlockIndex = 0;
12954  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12955  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12956  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12957  if(!m_ExplicitBlockSize)
12958  {
12959  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12960  {
12961  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12962  if(smallerNewBlockSize >= size)
12963  {
12964  newBlockSize = smallerNewBlockSize;
12965  ++newBlockSizeShift;
12966  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12967  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12968  }
12969  else
12970  {
12971  break;
12972  }
12973  }
12974  }
12975 
12976  if(res == VK_SUCCESS)
12977  {
12978  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12979  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12980 
12981  res = AllocateFromBlock(
12982  pBlock,
12983  currentFrameIndex,
12984  size,
12985  alignment,
12986  allocFlagsCopy,
12987  createInfo.pUserData,
12988  suballocType,
12989  strategy,
12990  pAllocation);
12991  if(res == VK_SUCCESS)
12992  {
12993  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12994  return VK_SUCCESS;
12995  }
12996  else
12997  {
12998  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12999  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13000  }
13001  }
13002  }
13003  }
13004 
13005  // 3. Try to allocate from existing blocks with making other allocations lost.
13006  if(canMakeOtherLost)
13007  {
13008  uint32_t tryIndex = 0;
13009  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
13010  {
13011  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
13012  VmaAllocationRequest bestRequest = {};
13013  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
13014 
13015  // 1. Search existing allocations.
13017  {
13018  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
13019  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
13020  {
13021  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13022  VMA_ASSERT(pCurrBlock);
13023  VmaAllocationRequest currRequest = {};
13024  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
13025  currentFrameIndex,
13026  m_FrameInUseCount,
13027  m_BufferImageGranularity,
13028  size,
13029  alignment,
13030  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
13031  suballocType,
13032  canMakeOtherLost,
13033  strategy,
13034  &currRequest))
13035  {
13036  const VkDeviceSize currRequestCost = currRequest.CalcCost();
13037  if(pBestRequestBlock == VMA_NULL ||
13038  currRequestCost < bestRequestCost)
13039  {
13040  pBestRequestBlock = pCurrBlock;
13041  bestRequest = currRequest;
13042  bestRequestCost = currRequestCost;
13043 
13044  if(bestRequestCost == 0)
13045  {
13046  break;
13047  }
13048  }
13049  }
13050  }
13051  }
13052  else // WORST_FIT, FIRST_FIT
13053  {
13054  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
13055  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13056  {
13057  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13058  VMA_ASSERT(pCurrBlock);
13059  VmaAllocationRequest currRequest = {};
13060  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
13061  currentFrameIndex,
13062  m_FrameInUseCount,
13063  m_BufferImageGranularity,
13064  size,
13065  alignment,
13066  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
13067  suballocType,
13068  canMakeOtherLost,
13069  strategy,
13070  &currRequest))
13071  {
13072  const VkDeviceSize currRequestCost = currRequest.CalcCost();
13073  if(pBestRequestBlock == VMA_NULL ||
13074  currRequestCost < bestRequestCost ||
13076  {
13077  pBestRequestBlock = pCurrBlock;
13078  bestRequest = currRequest;
13079  bestRequestCost = currRequestCost;
13080 
13081  if(bestRequestCost == 0 ||
13083  {
13084  break;
13085  }
13086  }
13087  }
13088  }
13089  }
13090 
13091  if(pBestRequestBlock != VMA_NULL)
13092  {
13093  if(mapped)
13094  {
13095  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
13096  if(res != VK_SUCCESS)
13097  {
13098  return res;
13099  }
13100  }
13101 
13102  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
13103  currentFrameIndex,
13104  m_FrameInUseCount,
13105  &bestRequest))
13106  {
13107  // Allocate from this pBlock.
13108  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13109  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
13110  UpdateHasEmptyBlock();
13111  (*pAllocation)->InitBlockAllocation(
13112  pBestRequestBlock,
13113  bestRequest.offset,
13114  alignment,
13115  size,
13116  m_MemoryTypeIndex,
13117  suballocType,
13118  mapped,
13119  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13120  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
13121  VMA_DEBUG_LOG(" Returned from existing block");
13122  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
13123  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13124  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13125  {
13126  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13127  }
13128  if(IsCorruptionDetectionEnabled())
13129  {
13130  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
13131  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13132  }
13133  return VK_SUCCESS;
13134  }
13135  // else: Some allocations must have been touched while we are here. Next try.
13136  }
13137  else
13138  {
13139  // Could not find place in any of the blocks - break outer loop.
13140  break;
13141  }
13142  }
13143  /* Maximum number of tries exceeded - a very unlike event when many other
13144  threads are simultaneously touching allocations making it impossible to make
13145  lost at the same time as we try to allocate. */
13146  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
13147  {
13148  return VK_ERROR_TOO_MANY_OBJECTS;
13149  }
13150  }
13151 
13152  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13153 }
13154 
13155 void VmaBlockVector::Free(
13156  const VmaAllocation hAllocation)
13157 {
13158  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
13159 
13160  bool budgetExceeded = false;
13161  {
13162  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13163  VmaBudget heapBudget = {};
13164  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
13165  budgetExceeded = heapBudget.usage >= heapBudget.budget;
13166  }
13167 
13168  // Scope for lock.
13169  {
13170  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13171 
13172  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13173 
13174  if(IsCorruptionDetectionEnabled())
13175  {
13176  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
13177  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
13178  }
13179 
13180  if(hAllocation->IsPersistentMap())
13181  {
13182  pBlock->Unmap(m_hAllocator, 1);
13183  }
13184 
13185  pBlock->m_pMetadata->Free(hAllocation);
13186  VMA_HEAVY_ASSERT(pBlock->Validate());
13187 
13188  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
13189 
13190  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
13191  // pBlock became empty after this deallocation.
13192  if(pBlock->m_pMetadata->IsEmpty())
13193  {
13194  // Already has empty block. We don't want to have two, so delete this one.
13195  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
13196  {
13197  pBlockToDelete = pBlock;
13198  Remove(pBlock);
13199  }
13200  // else: We now have an empty block - leave it.
13201  }
13202  // pBlock didn't become empty, but we have another empty block - find and free that one.
13203  // (This is optional, heuristics.)
13204  else if(m_HasEmptyBlock && canDeleteBlock)
13205  {
13206  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
13207  if(pLastBlock->m_pMetadata->IsEmpty())
13208  {
13209  pBlockToDelete = pLastBlock;
13210  m_Blocks.pop_back();
13211  }
13212  }
13213 
13214  UpdateHasEmptyBlock();
13215  IncrementallySortBlocks();
13216  }
13217 
13218  // Destruction of a free block. Deferred until this point, outside of mutex
13219  // lock, for performance reason.
13220  if(pBlockToDelete != VMA_NULL)
13221  {
13222  VMA_DEBUG_LOG(" Deleted empty block");
13223  pBlockToDelete->Destroy(m_hAllocator);
13224  vma_delete(m_hAllocator, pBlockToDelete);
13225  }
13226 }
13227 
13228 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
13229 {
13230  VkDeviceSize result = 0;
13231  for(size_t i = m_Blocks.size(); i--; )
13232  {
13233  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
13234  if(result >= m_PreferredBlockSize)
13235  {
13236  break;
13237  }
13238  }
13239  return result;
13240 }
13241 
13242 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
13243 {
13244  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13245  {
13246  if(m_Blocks[blockIndex] == pBlock)
13247  {
13248  VmaVectorRemove(m_Blocks, blockIndex);
13249  return;
13250  }
13251  }
13252  VMA_ASSERT(0);
13253 }
13254 
13255 void VmaBlockVector::IncrementallySortBlocks()
13256 {
13257  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13258  {
13259  // Bubble sort only until first swap.
13260  for(size_t i = 1; i < m_Blocks.size(); ++i)
13261  {
13262  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
13263  {
13264  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
13265  return;
13266  }
13267  }
13268  }
13269 }
13270 
13271 VkResult VmaBlockVector::AllocateFromBlock(
13272  VmaDeviceMemoryBlock* pBlock,
13273  uint32_t currentFrameIndex,
13274  VkDeviceSize size,
13275  VkDeviceSize alignment,
13276  VmaAllocationCreateFlags allocFlags,
13277  void* pUserData,
13278  VmaSuballocationType suballocType,
13279  uint32_t strategy,
13280  VmaAllocation* pAllocation)
13281 {
13282  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
13283  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13284  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13285  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13286 
13287  VmaAllocationRequest currRequest = {};
13288  if(pBlock->m_pMetadata->CreateAllocationRequest(
13289  currentFrameIndex,
13290  m_FrameInUseCount,
13291  m_BufferImageGranularity,
13292  size,
13293  alignment,
13294  isUpperAddress,
13295  suballocType,
13296  false, // canMakeOtherLost
13297  strategy,
13298  &currRequest))
13299  {
13300  // Allocate from pCurrBlock.
13301  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
13302 
13303  if(mapped)
13304  {
13305  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
13306  if(res != VK_SUCCESS)
13307  {
13308  return res;
13309  }
13310  }
13311 
13312  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13313  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
13314  UpdateHasEmptyBlock();
13315  (*pAllocation)->InitBlockAllocation(
13316  pBlock,
13317  currRequest.offset,
13318  alignment,
13319  size,
13320  m_MemoryTypeIndex,
13321  suballocType,
13322  mapped,
13323  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13324  VMA_HEAVY_ASSERT(pBlock->Validate());
13325  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
13326  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13327  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13328  {
13329  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13330  }
13331  if(IsCorruptionDetectionEnabled())
13332  {
13333  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
13334  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13335  }
13336  return VK_SUCCESS;
13337  }
13338  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13339 }
13340 
13341 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
13342 {
13343  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
13344  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
13345  allocInfo.allocationSize = blockSize;
13346 
13347 #if VMA_BUFFER_DEVICE_ADDRESS
13348  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
13349  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
13350  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
13351  {
13352  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
13353  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
13354  }
13355 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
13356 
13357 #if VMA_MEMORY_PRIORITY
13358  VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
13359  if(m_hAllocator->m_UseExtMemoryPriority)
13360  {
13361  priorityInfo.priority = m_Priority;
13362  VmaPnextChainPushFront(&allocInfo, &priorityInfo);
13363  }
13364 #endif // #if VMA_MEMORY_PRIORITY
13365 
13366  VkDeviceMemory mem = VK_NULL_HANDLE;
13367  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
13368  if(res < 0)
13369  {
13370  return res;
13371  }
13372 
13373  // New VkDeviceMemory successfully created.
13374 
13375  // Create new Allocation for it.
13376  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
13377  pBlock->Init(
13378  m_hAllocator,
13379  m_hParentPool,
13380  m_MemoryTypeIndex,
13381  mem,
13382  allocInfo.allocationSize,
13383  m_NextBlockId++,
13384  m_Algorithm);
13385 
13386  m_Blocks.push_back(pBlock);
13387  if(pNewBlockIndex != VMA_NULL)
13388  {
13389  *pNewBlockIndex = m_Blocks.size() - 1;
13390  }
13391 
13392  return VK_SUCCESS;
13393 }
13394 
13395 void VmaBlockVector::ApplyDefragmentationMovesCpu(
13396  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13397  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
13398 {
13399  const size_t blockCount = m_Blocks.size();
13400  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
13401 
13402  enum BLOCK_FLAG
13403  {
13404  BLOCK_FLAG_USED = 0x00000001,
13405  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
13406  };
13407 
13408  struct BlockInfo
13409  {
13410  uint32_t flags;
13411  void* pMappedData;
13412  };
13413  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
13414  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
13415  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
13416 
13417  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13418  const size_t moveCount = moves.size();
13419  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13420  {
13421  const VmaDefragmentationMove& move = moves[moveIndex];
13422  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
13423  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
13424  }
13425 
13426  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13427 
13428  // Go over all blocks. Get mapped pointer or map if necessary.
13429  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13430  {
13431  BlockInfo& currBlockInfo = blockInfo[blockIndex];
13432  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13433  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
13434  {
13435  currBlockInfo.pMappedData = pBlock->GetMappedData();
13436  // It is not originally mapped - map it.
13437  if(currBlockInfo.pMappedData == VMA_NULL)
13438  {
13439  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
13440  if(pDefragCtx->res == VK_SUCCESS)
13441  {
13442  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
13443  }
13444  }
13445  }
13446  }
13447 
13448  // Go over all moves. Do actual data transfer.
13449  if(pDefragCtx->res == VK_SUCCESS)
13450  {
13451  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13452  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13453 
13454  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13455  {
13456  const VmaDefragmentationMove& move = moves[moveIndex];
13457 
13458  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
13459  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
13460 
13461  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
13462 
13463  // Invalidate source.
13464  if(isNonCoherent)
13465  {
13466  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
13467  memRange.memory = pSrcBlock->GetDeviceMemory();
13468  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
13469  memRange.size = VMA_MIN(
13470  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
13471  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
13472  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13473  }
13474 
13475  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
13476  memmove(
13477  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
13478  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
13479  static_cast<size_t>(move.size));
13480 
13481  if(IsCorruptionDetectionEnabled())
13482  {
13483  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
13484  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
13485  }
13486 
13487  // Flush destination.
13488  if(isNonCoherent)
13489  {
13490  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
13491  memRange.memory = pDstBlock->GetDeviceMemory();
13492  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
13493  memRange.size = VMA_MIN(
13494  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
13495  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
13496  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13497  }
13498  }
13499  }
13500 
13501  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
13502  // Regardless of pCtx->res == VK_SUCCESS.
13503  for(size_t blockIndex = blockCount; blockIndex--; )
13504  {
13505  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
13506  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
13507  {
13508  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13509  pBlock->Unmap(m_hAllocator, 1);
13510  }
13511  }
13512 }
13513 
13514 void VmaBlockVector::ApplyDefragmentationMovesGpu(
13515  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13516  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13517  VkCommandBuffer commandBuffer)
13518 {
13519  const size_t blockCount = m_Blocks.size();
13520 
13521  pDefragCtx->blockContexts.resize(blockCount);
13522  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
13523 
13524  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13525  const size_t moveCount = moves.size();
13526  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13527  {
13528  const VmaDefragmentationMove& move = moves[moveIndex];
13529 
13530  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
13531  {
13532  // Old school move still require us to map the whole block
13533  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13534  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13535  }
13536  }
13537 
13538  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13539 
13540  // Go over all blocks. Create and bind buffer for whole block if necessary.
13541  {
13542  VkBufferCreateInfo bufCreateInfo;
13543  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
13544 
13545  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13546  {
13547  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
13548  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13549  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
13550  {
13551  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
13552  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
13553  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
13554  if(pDefragCtx->res == VK_SUCCESS)
13555  {
13556  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
13557  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
13558  }
13559  }
13560  }
13561  }
13562 
13563  // Go over all moves. Post data transfer commands to command buffer.
13564  if(pDefragCtx->res == VK_SUCCESS)
13565  {
13566  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13567  {
13568  const VmaDefragmentationMove& move = moves[moveIndex];
13569 
13570  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
13571  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
13572 
13573  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
13574 
13575  VkBufferCopy region = {
13576  move.srcOffset,
13577  move.dstOffset,
13578  move.size };
13579  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
13580  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
13581  }
13582  }
13583 
13584  // Save buffers to defrag context for later destruction.
13585  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
13586  {
13587  pDefragCtx->res = VK_NOT_READY;
13588  }
13589 }
13590 
13591 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
13592 {
13593  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13594  {
13595  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13596  if(pBlock->m_pMetadata->IsEmpty())
13597  {
13598  if(m_Blocks.size() > m_MinBlockCount)
13599  {
13600  if(pDefragmentationStats != VMA_NULL)
13601  {
13602  ++pDefragmentationStats->deviceMemoryBlocksFreed;
13603  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
13604  }
13605 
13606  VmaVectorRemove(m_Blocks, blockIndex);
13607  pBlock->Destroy(m_hAllocator);
13608  vma_delete(m_hAllocator, pBlock);
13609  }
13610  else
13611  {
13612  break;
13613  }
13614  }
13615  }
13616  UpdateHasEmptyBlock();
13617 }
13618 
13619 void VmaBlockVector::UpdateHasEmptyBlock()
13620 {
13621  m_HasEmptyBlock = false;
13622  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
13623  {
13624  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
13625  if(pBlock->m_pMetadata->IsEmpty())
13626  {
13627  m_HasEmptyBlock = true;
13628  break;
13629  }
13630  }
13631 }
13632 
13633 #if VMA_STATS_STRING_ENABLED
13634 
13635 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13636 {
13637  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13638 
13639  json.BeginObject();
13640 
13641  if(IsCustomPool())
13642  {
13643  const char* poolName = m_hParentPool->GetName();
13644  if(poolName != VMA_NULL && poolName[0] != '\0')
13645  {
13646  json.WriteString("Name");
13647  json.WriteString(poolName);
13648  }
13649 
13650  json.WriteString("MemoryTypeIndex");
13651  json.WriteNumber(m_MemoryTypeIndex);
13652 
13653  json.WriteString("BlockSize");
13654  json.WriteNumber(m_PreferredBlockSize);
13655 
13656  json.WriteString("BlockCount");
13657  json.BeginObject(true);
13658  if(m_MinBlockCount > 0)
13659  {
13660  json.WriteString("Min");
13661  json.WriteNumber((uint64_t)m_MinBlockCount);
13662  }
13663  if(m_MaxBlockCount < SIZE_MAX)
13664  {
13665  json.WriteString("Max");
13666  json.WriteNumber((uint64_t)m_MaxBlockCount);
13667  }
13668  json.WriteString("Cur");
13669  json.WriteNumber((uint64_t)m_Blocks.size());
13670  json.EndObject();
13671 
13672  if(m_FrameInUseCount > 0)
13673  {
13674  json.WriteString("FrameInUseCount");
13675  json.WriteNumber(m_FrameInUseCount);
13676  }
13677 
13678  if(m_Algorithm != 0)
13679  {
13680  json.WriteString("Algorithm");
13681  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
13682  }
13683  }
13684  else
13685  {
13686  json.WriteString("PreferredBlockSize");
13687  json.WriteNumber(m_PreferredBlockSize);
13688  }
13689 
13690  json.WriteString("Blocks");
13691  json.BeginObject();
13692  for(size_t i = 0; i < m_Blocks.size(); ++i)
13693  {
13694  json.BeginString();
13695  json.ContinueString(m_Blocks[i]->GetId());
13696  json.EndString();
13697 
13698  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
13699  }
13700  json.EndObject();
13701 
13702  json.EndObject();
13703 }
13704 
13705 #endif // #if VMA_STATS_STRING_ENABLED
13706 
13707 void VmaBlockVector::Defragment(
13708  class VmaBlockVectorDefragmentationContext* pCtx,
13710  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
13711  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
13712  VkCommandBuffer commandBuffer)
13713 {
13714  pCtx->res = VK_SUCCESS;
13715 
13716  const VkMemoryPropertyFlags memPropFlags =
13717  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
13718  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
13719 
13720  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
13721  isHostVisible;
13722  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
13723  !IsCorruptionDetectionEnabled() &&
13724  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
13725 
13726  // There are options to defragment this memory type.
13727  if(canDefragmentOnCpu || canDefragmentOnGpu)
13728  {
13729  bool defragmentOnGpu;
13730  // There is only one option to defragment this memory type.
13731  if(canDefragmentOnGpu != canDefragmentOnCpu)
13732  {
13733  defragmentOnGpu = canDefragmentOnGpu;
13734  }
13735  // Both options are available: Heuristics to choose the best one.
13736  else
13737  {
13738  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
13739  m_hAllocator->IsIntegratedGpu();
13740  }
13741 
13742  bool overlappingMoveSupported = !defragmentOnGpu;
13743 
13744  if(m_hAllocator->m_UseMutex)
13745  {
13747  {
13748  if(!m_Mutex.TryLockWrite())
13749  {
13750  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
13751  return;
13752  }
13753  }
13754  else
13755  {
13756  m_Mutex.LockWrite();
13757  pCtx->mutexLocked = true;
13758  }
13759  }
13760 
13761  pCtx->Begin(overlappingMoveSupported, flags);
13762 
13763  // Defragment.
13764 
13765  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
13766  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
13767  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
13768 
13769  // Accumulate statistics.
13770  if(pStats != VMA_NULL)
13771  {
13772  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
13773  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
13774  pStats->bytesMoved += bytesMoved;
13775  pStats->allocationsMoved += allocationsMoved;
13776  VMA_ASSERT(bytesMoved <= maxBytesToMove);
13777  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
13778  if(defragmentOnGpu)
13779  {
13780  maxGpuBytesToMove -= bytesMoved;
13781  maxGpuAllocationsToMove -= allocationsMoved;
13782  }
13783  else
13784  {
13785  maxCpuBytesToMove -= bytesMoved;
13786  maxCpuAllocationsToMove -= allocationsMoved;
13787  }
13788  }
13789 
13791  {
13792  if(m_hAllocator->m_UseMutex)
13793  m_Mutex.UnlockWrite();
13794 
13795  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
13796  pCtx->res = VK_NOT_READY;
13797 
13798  return;
13799  }
13800 
13801  if(pCtx->res >= VK_SUCCESS)
13802  {
13803  if(defragmentOnGpu)
13804  {
13805  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
13806  }
13807  else
13808  {
13809  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
13810  }
13811  }
13812  }
13813 }
13814 
13815 void VmaBlockVector::DefragmentationEnd(
13816  class VmaBlockVectorDefragmentationContext* pCtx,
13817  uint32_t flags,
13818  VmaDefragmentationStats* pStats)
13819 {
13820  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
13821  {
13822  VMA_ASSERT(pCtx->mutexLocked == false);
13823 
13824  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
13825  // lock protecting us. Since we mutate state here, we have to take the lock out now
13826  m_Mutex.LockWrite();
13827  pCtx->mutexLocked = true;
13828  }
13829 
13830  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
13831  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
13832  {
13833  // Destroy buffers.
13834  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
13835  {
13836  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
13837  if(blockCtx.hBuffer)
13838  {
13839  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
13840  }
13841  }
13842 
13843  if(pCtx->res >= VK_SUCCESS)
13844  {
13845  FreeEmptyBlocks(pStats);
13846  }
13847  }
13848 
13849  if(pCtx->mutexLocked)
13850  {
13851  VMA_ASSERT(m_hAllocator->m_UseMutex);
13852  m_Mutex.UnlockWrite();
13853  }
13854 }
13855 
13856 uint32_t VmaBlockVector::ProcessDefragmentations(
13857  class VmaBlockVectorDefragmentationContext *pCtx,
13858  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
13859 {
13860  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13861 
13862  const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
13863 
13864  for(uint32_t i = 0; i < moveCount; ++ i)
13865  {
13866  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
13867 
13868  pMove->allocation = move.hAllocation;
13869  pMove->memory = move.pDstBlock->GetDeviceMemory();
13870  pMove->offset = move.dstOffset;
13871 
13872  ++ pMove;
13873  }
13874 
13875  pCtx->defragmentationMovesProcessed += moveCount;
13876 
13877  return moveCount;
13878 }
13879 
13880 void VmaBlockVector::CommitDefragmentations(
13881  class VmaBlockVectorDefragmentationContext *pCtx,
13882  VmaDefragmentationStats* pStats)
13883 {
13884  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13885 
13886  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
13887  {
13888  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
13889 
13890  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
13891  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
13892  }
13893 
13894  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
13895  FreeEmptyBlocks(pStats);
13896 }
13897 
13898 size_t VmaBlockVector::CalcAllocationCount() const
13899 {
13900  size_t result = 0;
13901  for(size_t i = 0; i < m_Blocks.size(); ++i)
13902  {
13903  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
13904  }
13905  return result;
13906 }
13907 
13908 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
13909 {
13910  if(m_BufferImageGranularity == 1)
13911  {
13912  return false;
13913  }
13914  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
13915  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
13916  {
13917  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
13918  VMA_ASSERT(m_Algorithm == 0);
13919  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
13920  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
13921  {
13922  return true;
13923  }
13924  }
13925  return false;
13926 }
13927 
13928 void VmaBlockVector::MakePoolAllocationsLost(
13929  uint32_t currentFrameIndex,
13930  size_t* pLostAllocationCount)
13931 {
13932  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13933  size_t lostAllocationCount = 0;
13934  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13935  {
13936  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13937  VMA_ASSERT(pBlock);
13938  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
13939  }
13940  if(pLostAllocationCount != VMA_NULL)
13941  {
13942  *pLostAllocationCount = lostAllocationCount;
13943  }
13944 }
13945 
13946 VkResult VmaBlockVector::CheckCorruption()
13947 {
13948  if(!IsCorruptionDetectionEnabled())
13949  {
13950  return VK_ERROR_FEATURE_NOT_PRESENT;
13951  }
13952 
13953  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13954  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13955  {
13956  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13957  VMA_ASSERT(pBlock);
13958  VkResult res = pBlock->CheckCorruption(m_hAllocator);
13959  if(res != VK_SUCCESS)
13960  {
13961  return res;
13962  }
13963  }
13964  return VK_SUCCESS;
13965 }
13966 
13967 void VmaBlockVector::AddStats(VmaStats* pStats)
13968 {
13969  const uint32_t memTypeIndex = m_MemoryTypeIndex;
13970  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
13971 
13972  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13973 
13974  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13975  {
13976  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13977  VMA_ASSERT(pBlock);
13978  VMA_HEAVY_ASSERT(pBlock->Validate());
13979  VmaStatInfo allocationStatInfo;
13980  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
13981  VmaAddStatInfo(pStats->total, allocationStatInfo);
13982  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
13983  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
13984  }
13985 }
13986 
13988 // VmaDefragmentationAlgorithm_Generic members definition
13989 
13990 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13991  VmaAllocator hAllocator,
13992  VmaBlockVector* pBlockVector,
13993  uint32_t currentFrameIndex,
13994  bool overlappingMoveSupported) :
13995  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13996  m_AllocationCount(0),
13997  m_AllAllocations(false),
13998  m_BytesMoved(0),
13999  m_AllocationsMoved(0),
14000  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
14001 {
14002  // Create block info for each block.
14003  const size_t blockCount = m_pBlockVector->m_Blocks.size();
14004  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14005  {
14006  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
14007  pBlockInfo->m_OriginalBlockIndex = blockIndex;
14008  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
14009  m_Blocks.push_back(pBlockInfo);
14010  }
14011 
14012  // Sort them by m_pBlock pointer value.
14013  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
14014 }
14015 
14016 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
14017 {
14018  for(size_t i = m_Blocks.size(); i--; )
14019  {
14020  vma_delete(m_hAllocator, m_Blocks[i]);
14021  }
14022 }
14023 
14024 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14025 {
14026  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
14027  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
14028  {
14029  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
14030  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
14031  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
14032  {
14033  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
14034  (*it)->m_Allocations.push_back(allocInfo);
14035  }
14036  else
14037  {
14038  VMA_ASSERT(0);
14039  }
14040 
14041  ++m_AllocationCount;
14042  }
14043 }
14044 
14045 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
14046  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14047  VkDeviceSize maxBytesToMove,
14048  uint32_t maxAllocationsToMove,
14049  bool freeOldAllocations)
14050 {
14051  if(m_Blocks.empty())
14052  {
14053  return VK_SUCCESS;
14054  }
14055 
14056  // This is a choice based on research.
14057  // Option 1:
14058  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
14059  // Option 2:
14060  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
14061  // Option 3:
14062  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
14063 
14064  size_t srcBlockMinIndex = 0;
14065  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
14066  /*
14067  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
14068  {
14069  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
14070  if(blocksWithNonMovableCount > 0)
14071  {
14072  srcBlockMinIndex = blocksWithNonMovableCount - 1;
14073  }
14074  }
14075  */
14076 
14077  size_t srcBlockIndex = m_Blocks.size() - 1;
14078  size_t srcAllocIndex = SIZE_MAX;
14079  for(;;)
14080  {
14081  // 1. Find next allocation to move.
14082  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
14083  // 1.2. Then start from last to first m_Allocations.
14084  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
14085  {
14086  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
14087  {
14088  // Finished: no more allocations to process.
14089  if(srcBlockIndex == srcBlockMinIndex)
14090  {
14091  return VK_SUCCESS;
14092  }
14093  else
14094  {
14095  --srcBlockIndex;
14096  srcAllocIndex = SIZE_MAX;
14097  }
14098  }
14099  else
14100  {
14101  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
14102  }
14103  }
14104 
14105  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
14106  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
14107 
14108  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
14109  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
14110  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
14111  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
14112 
14113  // 2. Try to find new place for this allocation in preceding or current block.
14114  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
14115  {
14116  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
14117  VmaAllocationRequest dstAllocRequest;
14118  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
14119  m_CurrentFrameIndex,
14120  m_pBlockVector->GetFrameInUseCount(),
14121  m_pBlockVector->GetBufferImageGranularity(),
14122  size,
14123  alignment,
14124  false, // upperAddress
14125  suballocType,
14126  false, // canMakeOtherLost
14127  strategy,
14128  &dstAllocRequest) &&
14129  MoveMakesSense(
14130  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
14131  {
14132  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
14133 
14134  // Reached limit on number of allocations or bytes to move.
14135  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
14136  (m_BytesMoved + size > maxBytesToMove))
14137  {
14138  return VK_SUCCESS;
14139  }
14140 
14141  VmaDefragmentationMove move = {};
14142  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
14143  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
14144  move.srcOffset = srcOffset;
14145  move.dstOffset = dstAllocRequest.offset;
14146  move.size = size;
14147  move.hAllocation = allocInfo.m_hAllocation;
14148  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
14149  move.pDstBlock = pDstBlockInfo->m_pBlock;
14150 
14151  moves.push_back(move);
14152 
14153  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
14154  dstAllocRequest,
14155  suballocType,
14156  size,
14157  allocInfo.m_hAllocation);
14158 
14159  if(freeOldAllocations)
14160  {
14161  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
14162  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
14163  }
14164 
14165  if(allocInfo.m_pChanged != VMA_NULL)
14166  {
14167  *allocInfo.m_pChanged = VK_TRUE;
14168  }
14169 
14170  ++m_AllocationsMoved;
14171  m_BytesMoved += size;
14172 
14173  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
14174 
14175  break;
14176  }
14177  }
14178 
14179  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
14180 
14181  if(srcAllocIndex > 0)
14182  {
14183  --srcAllocIndex;
14184  }
14185  else
14186  {
14187  if(srcBlockIndex > 0)
14188  {
14189  --srcBlockIndex;
14190  srcAllocIndex = SIZE_MAX;
14191  }
14192  else
14193  {
14194  return VK_SUCCESS;
14195  }
14196  }
14197  }
14198 }
14199 
14200 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
14201 {
14202  size_t result = 0;
14203  for(size_t i = 0; i < m_Blocks.size(); ++i)
14204  {
14205  if(m_Blocks[i]->m_HasNonMovableAllocations)
14206  {
14207  ++result;
14208  }
14209  }
14210  return result;
14211 }
14212 
14213 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
14214  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14215  VkDeviceSize maxBytesToMove,
14216  uint32_t maxAllocationsToMove,
14218 {
14219  if(!m_AllAllocations && m_AllocationCount == 0)
14220  {
14221  return VK_SUCCESS;
14222  }
14223 
14224  const size_t blockCount = m_Blocks.size();
14225  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14226  {
14227  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
14228 
14229  if(m_AllAllocations)
14230  {
14231  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
14232  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
14233  it != pMetadata->m_Suballocations.end();
14234  ++it)
14235  {
14236  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
14237  {
14238  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
14239  pBlockInfo->m_Allocations.push_back(allocInfo);
14240  }
14241  }
14242  }
14243 
14244  pBlockInfo->CalcHasNonMovableAllocations();
14245 
14246  // This is a choice based on research.
14247  // Option 1:
14248  pBlockInfo->SortAllocationsByOffsetDescending();
14249  // Option 2:
14250  //pBlockInfo->SortAllocationsBySizeDescending();
14251  }
14252 
14253  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
14254  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
14255 
14256  // This is a choice based on research.
14257  const uint32_t roundCount = 2;
14258 
14259  // Execute defragmentation rounds (the main part).
14260  VkResult result = VK_SUCCESS;
14261  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
14262  {
14263  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
14264  }
14265 
14266  return result;
14267 }
14268 
14269 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
14270  size_t dstBlockIndex, VkDeviceSize dstOffset,
14271  size_t srcBlockIndex, VkDeviceSize srcOffset)
14272 {
14273  if(dstBlockIndex < srcBlockIndex)
14274  {
14275  return true;
14276  }
14277  if(dstBlockIndex > srcBlockIndex)
14278  {
14279  return false;
14280  }
14281  if(dstOffset < srcOffset)
14282  {
14283  return true;
14284  }
14285  return false;
14286 }
14287 
14289 // VmaDefragmentationAlgorithm_Fast
14290 
14291 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
14292  VmaAllocator hAllocator,
14293  VmaBlockVector* pBlockVector,
14294  uint32_t currentFrameIndex,
14295  bool overlappingMoveSupported) :
14296  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14297  m_OverlappingMoveSupported(overlappingMoveSupported),
14298  m_AllocationCount(0),
14299  m_AllAllocations(false),
14300  m_BytesMoved(0),
14301  m_AllocationsMoved(0),
14302  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
14303 {
14304  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
14305 
14306 }
14307 
14308 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
14309 {
14310 }
14311 
14312 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
14313  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14314  VkDeviceSize maxBytesToMove,
14315  uint32_t maxAllocationsToMove,
14317 {
14318  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
14319 
14320  const size_t blockCount = m_pBlockVector->GetBlockCount();
14321  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
14322  {
14323  return VK_SUCCESS;
14324  }
14325 
14326  PreprocessMetadata();
14327 
14328  // Sort blocks in order from most destination.
14329 
14330  m_BlockInfos.resize(blockCount);
14331  for(size_t i = 0; i < blockCount; ++i)
14332  {
14333  m_BlockInfos[i].origBlockIndex = i;
14334  }
14335 
14336  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
14337  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
14338  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
14339  });
14340 
14341  // THE MAIN ALGORITHM
14342 
14343  FreeSpaceDatabase freeSpaceDb;
14344 
14345  size_t dstBlockInfoIndex = 0;
14346  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14347  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14348  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14349  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
14350  VkDeviceSize dstOffset = 0;
14351 
14352  bool end = false;
14353  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
14354  {
14355  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
14356  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
14357  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
14358  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
14359  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
14360  {
14361  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
14362  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
14363  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
14364  if(m_AllocationsMoved == maxAllocationsToMove ||
14365  m_BytesMoved + srcAllocSize > maxBytesToMove)
14366  {
14367  end = true;
14368  break;
14369  }
14370  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
14371 
14372  VmaDefragmentationMove move = {};
14373  // Try to place it in one of free spaces from the database.
14374  size_t freeSpaceInfoIndex;
14375  VkDeviceSize dstAllocOffset;
14376  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
14377  freeSpaceInfoIndex, dstAllocOffset))
14378  {
14379  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
14380  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
14381  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
14382 
14383  // Same block
14384  if(freeSpaceInfoIndex == srcBlockInfoIndex)
14385  {
14386  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14387 
14388  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14389 
14390  VmaSuballocation suballoc = *srcSuballocIt;
14391  suballoc.offset = dstAllocOffset;
14392  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
14393  m_BytesMoved += srcAllocSize;
14394  ++m_AllocationsMoved;
14395 
14396  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14397  ++nextSuballocIt;
14398  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14399  srcSuballocIt = nextSuballocIt;
14400 
14401  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14402 
14403  move.srcBlockIndex = srcOrigBlockIndex;
14404  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14405  move.srcOffset = srcAllocOffset;
14406  move.dstOffset = dstAllocOffset;
14407  move.size = srcAllocSize;
14408 
14409  moves.push_back(move);
14410  }
14411  // Different block
14412  else
14413  {
14414  // MOVE OPTION 2: Move the allocation to a different block.
14415 
14416  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
14417 
14418  VmaSuballocation suballoc = *srcSuballocIt;
14419  suballoc.offset = dstAllocOffset;
14420  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
14421  m_BytesMoved += srcAllocSize;
14422  ++m_AllocationsMoved;
14423 
14424  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14425  ++nextSuballocIt;
14426  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14427  srcSuballocIt = nextSuballocIt;
14428 
14429  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14430 
14431  move.srcBlockIndex = srcOrigBlockIndex;
14432  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14433  move.srcOffset = srcAllocOffset;
14434  move.dstOffset = dstAllocOffset;
14435  move.size = srcAllocSize;
14436 
14437  moves.push_back(move);
14438  }
14439  }
14440  else
14441  {
14442  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
14443 
14444  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
14445  while(dstBlockInfoIndex < srcBlockInfoIndex &&
14446  dstAllocOffset + srcAllocSize > dstBlockSize)
14447  {
14448  // But before that, register remaining free space at the end of dst block.
14449  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
14450 
14451  ++dstBlockInfoIndex;
14452  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14453  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14454  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14455  dstBlockSize = pDstMetadata->GetSize();
14456  dstOffset = 0;
14457  dstAllocOffset = 0;
14458  }
14459 
14460  // Same block
14461  if(dstBlockInfoIndex == srcBlockInfoIndex)
14462  {
14463  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14464 
14465  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
14466 
14467  bool skipOver = overlap;
14468  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
14469  {
14470  // If destination and source place overlap, skip if it would move it
14471  // by only < 1/64 of its size.
14472  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
14473  }
14474 
14475  if(skipOver)
14476  {
14477  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
14478 
14479  dstOffset = srcAllocOffset + srcAllocSize;
14480  ++srcSuballocIt;
14481  }
14482  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14483  else
14484  {
14485  srcSuballocIt->offset = dstAllocOffset;
14486  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
14487  dstOffset = dstAllocOffset + srcAllocSize;
14488  m_BytesMoved += srcAllocSize;
14489  ++m_AllocationsMoved;
14490  ++srcSuballocIt;
14491 
14492  move.srcBlockIndex = srcOrigBlockIndex;
14493  move.dstBlockIndex = dstOrigBlockIndex;
14494  move.srcOffset = srcAllocOffset;
14495  move.dstOffset = dstAllocOffset;
14496  move.size = srcAllocSize;
14497 
14498  moves.push_back(move);
14499  }
14500  }
14501  // Different block
14502  else
14503  {
14504  // MOVE OPTION 2: Move the allocation to a different block.
14505 
14506  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
14507  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
14508 
14509  VmaSuballocation suballoc = *srcSuballocIt;
14510  suballoc.offset = dstAllocOffset;
14511  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
14512  dstOffset = dstAllocOffset + srcAllocSize;
14513  m_BytesMoved += srcAllocSize;
14514  ++m_AllocationsMoved;
14515 
14516  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14517  ++nextSuballocIt;
14518  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14519  srcSuballocIt = nextSuballocIt;
14520 
14521  pDstMetadata->m_Suballocations.push_back(suballoc);
14522 
14523  move.srcBlockIndex = srcOrigBlockIndex;
14524  move.dstBlockIndex = dstOrigBlockIndex;
14525  move.srcOffset = srcAllocOffset;
14526  move.dstOffset = dstAllocOffset;
14527  move.size = srcAllocSize;
14528 
14529  moves.push_back(move);
14530  }
14531  }
14532  }
14533  }
14534 
14535  m_BlockInfos.clear();
14536 
14537  PostprocessMetadata();
14538 
14539  return VK_SUCCESS;
14540 }
14541 
14542 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
14543 {
14544  const size_t blockCount = m_pBlockVector->GetBlockCount();
14545  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14546  {
14547  VmaBlockMetadata_Generic* const pMetadata =
14548  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14549  pMetadata->m_FreeCount = 0;
14550  pMetadata->m_SumFreeSize = pMetadata->GetSize();
14551  pMetadata->m_FreeSuballocationsBySize.clear();
14552  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14553  it != pMetadata->m_Suballocations.end(); )
14554  {
14555  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
14556  {
14557  VmaSuballocationList::iterator nextIt = it;
14558  ++nextIt;
14559  pMetadata->m_Suballocations.erase(it);
14560  it = nextIt;
14561  }
14562  else
14563  {
14564  ++it;
14565  }
14566  }
14567  }
14568 }
14569 
14570 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
14571 {
14572  const size_t blockCount = m_pBlockVector->GetBlockCount();
14573  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14574  {
14575  VmaBlockMetadata_Generic* const pMetadata =
14576  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14577  const VkDeviceSize blockSize = pMetadata->GetSize();
14578 
14579  // No allocations in this block - entire area is free.
14580  if(pMetadata->m_Suballocations.empty())
14581  {
14582  pMetadata->m_FreeCount = 1;
14583  //pMetadata->m_SumFreeSize is already set to blockSize.
14584  VmaSuballocation suballoc = {
14585  0, // offset
14586  blockSize, // size
14587  VMA_NULL, // hAllocation
14588  VMA_SUBALLOCATION_TYPE_FREE };
14589  pMetadata->m_Suballocations.push_back(suballoc);
14590  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
14591  }
14592  // There are some allocations in this block.
14593  else
14594  {
14595  VkDeviceSize offset = 0;
14596  VmaSuballocationList::iterator it;
14597  for(it = pMetadata->m_Suballocations.begin();
14598  it != pMetadata->m_Suballocations.end();
14599  ++it)
14600  {
14601  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
14602  VMA_ASSERT(it->offset >= offset);
14603 
14604  // Need to insert preceding free space.
14605  if(it->offset > offset)
14606  {
14607  ++pMetadata->m_FreeCount;
14608  const VkDeviceSize freeSize = it->offset - offset;
14609  VmaSuballocation suballoc = {
14610  offset, // offset
14611  freeSize, // size
14612  VMA_NULL, // hAllocation
14613  VMA_SUBALLOCATION_TYPE_FREE };
14614  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14615  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14616  {
14617  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
14618  }
14619  }
14620 
14621  pMetadata->m_SumFreeSize -= it->size;
14622  offset = it->offset + it->size;
14623  }
14624 
14625  // Need to insert trailing free space.
14626  if(offset < blockSize)
14627  {
14628  ++pMetadata->m_FreeCount;
14629  const VkDeviceSize freeSize = blockSize - offset;
14630  VmaSuballocation suballoc = {
14631  offset, // offset
14632  freeSize, // size
14633  VMA_NULL, // hAllocation
14634  VMA_SUBALLOCATION_TYPE_FREE };
14635  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
14636  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14637  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14638  {
14639  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
14640  }
14641  }
14642 
14643  VMA_SORT(
14644  pMetadata->m_FreeSuballocationsBySize.begin(),
14645  pMetadata->m_FreeSuballocationsBySize.end(),
14646  VmaSuballocationItemSizeLess());
14647  }
14648 
14649  VMA_HEAVY_ASSERT(pMetadata->Validate());
14650  }
14651 }
14652 
14653 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
14654 {
14655  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
14656  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14657  while(it != pMetadata->m_Suballocations.end())
14658  {
14659  if(it->offset < suballoc.offset)
14660  {
14661  ++it;
14662  }
14663  }
14664  pMetadata->m_Suballocations.insert(it, suballoc);
14665 }
14666 
14668 // VmaBlockVectorDefragmentationContext
14669 
14670 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
14671  VmaAllocator hAllocator,
14672  VmaPool hCustomPool,
14673  VmaBlockVector* pBlockVector,
14674  uint32_t currFrameIndex) :
14675  res(VK_SUCCESS),
14676  mutexLocked(false),
14677  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
14678  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
14679  defragmentationMovesProcessed(0),
14680  defragmentationMovesCommitted(0),
14681  hasDefragmentationPlan(0),
14682  m_hAllocator(hAllocator),
14683  m_hCustomPool(hCustomPool),
14684  m_pBlockVector(pBlockVector),
14685  m_CurrFrameIndex(currFrameIndex),
14686  m_pAlgorithm(VMA_NULL),
14687  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
14688  m_AllAllocations(false)
14689 {
14690 }
14691 
14692 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
14693 {
14694  vma_delete(m_hAllocator, m_pAlgorithm);
14695 }
14696 
14697 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14698 {
14699  AllocInfo info = { hAlloc, pChanged };
14700  m_Allocations.push_back(info);
14701 }
14702 
14703 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
14704 {
14705  const bool allAllocations = m_AllAllocations ||
14706  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
14707 
14708  /********************************
14709  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
14710  ********************************/
14711 
14712  /*
14713  Fast algorithm is supported only when certain criteria are met:
14714  - VMA_DEBUG_MARGIN is 0.
14715  - All allocations in this block vector are moveable.
14716  - There is no possibility of image/buffer granularity conflict.
14717  - The defragmentation is not incremental
14718  */
14719  if(VMA_DEBUG_MARGIN == 0 &&
14720  allAllocations &&
14721  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
14723  {
14724  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
14725  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14726  }
14727  else
14728  {
14729  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
14730  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14731  }
14732 
14733  if(allAllocations)
14734  {
14735  m_pAlgorithm->AddAll();
14736  }
14737  else
14738  {
14739  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
14740  {
14741  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
14742  }
14743  }
14744 }
14745 
14747 // VmaDefragmentationContext
14748 
14749 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
14750  VmaAllocator hAllocator,
14751  uint32_t currFrameIndex,
14752  uint32_t flags,
14753  VmaDefragmentationStats* pStats) :
14754  m_hAllocator(hAllocator),
14755  m_CurrFrameIndex(currFrameIndex),
14756  m_Flags(flags),
14757  m_pStats(pStats),
14758  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
14759 {
14760  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
14761 }
14762 
14763 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
14764 {
14765  for(size_t i = m_CustomPoolContexts.size(); i--; )
14766  {
14767  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
14768  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14769  vma_delete(m_hAllocator, pBlockVectorCtx);
14770  }
14771  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
14772  {
14773  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
14774  if(pBlockVectorCtx)
14775  {
14776  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14777  vma_delete(m_hAllocator, pBlockVectorCtx);
14778  }
14779  }
14780 }
14781 
14782 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
14783 {
14784  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
14785  {
14786  VmaPool pool = pPools[poolIndex];
14787  VMA_ASSERT(pool);
14788  // Pools with algorithm other than default are not defragmented.
14789  if(pool->m_BlockVector.GetAlgorithm() == 0)
14790  {
14791  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14792 
14793  for(size_t i = m_CustomPoolContexts.size(); i--; )
14794  {
14795  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
14796  {
14797  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14798  break;
14799  }
14800  }
14801 
14802  if(!pBlockVectorDefragCtx)
14803  {
14804  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14805  m_hAllocator,
14806  pool,
14807  &pool->m_BlockVector,
14808  m_CurrFrameIndex);
14809  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14810  }
14811 
14812  pBlockVectorDefragCtx->AddAll();
14813  }
14814  }
14815 }
14816 
14817 void VmaDefragmentationContext_T::AddAllocations(
14818  uint32_t allocationCount,
14819  const VmaAllocation* pAllocations,
14820  VkBool32* pAllocationsChanged)
14821 {
14822  // Dispatch pAllocations among defragmentators. Create them when necessary.
14823  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14824  {
14825  const VmaAllocation hAlloc = pAllocations[allocIndex];
14826  VMA_ASSERT(hAlloc);
14827  // DedicatedAlloc cannot be defragmented.
14828  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
14829  // Lost allocation cannot be defragmented.
14830  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
14831  {
14832  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14833 
14834  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
14835  // This allocation belongs to custom pool.
14836  if(hAllocPool != VK_NULL_HANDLE)
14837  {
14838  // Pools with algorithm other than default are not defragmented.
14839  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
14840  {
14841  for(size_t i = m_CustomPoolContexts.size(); i--; )
14842  {
14843  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
14844  {
14845  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14846  break;
14847  }
14848  }
14849  if(!pBlockVectorDefragCtx)
14850  {
14851  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14852  m_hAllocator,
14853  hAllocPool,
14854  &hAllocPool->m_BlockVector,
14855  m_CurrFrameIndex);
14856  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14857  }
14858  }
14859  }
14860  // This allocation belongs to default pool.
14861  else
14862  {
14863  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
14864  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
14865  if(!pBlockVectorDefragCtx)
14866  {
14867  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14868  m_hAllocator,
14869  VMA_NULL, // hCustomPool
14870  m_hAllocator->m_pBlockVectors[memTypeIndex],
14871  m_CurrFrameIndex);
14872  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
14873  }
14874  }
14875 
14876  if(pBlockVectorDefragCtx)
14877  {
14878  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
14879  &pAllocationsChanged[allocIndex] : VMA_NULL;
14880  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
14881  }
14882  }
14883  }
14884 }
14885 
14886 VkResult VmaDefragmentationContext_T::Defragment(
14887  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
14888  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
14889  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
14890 {
14891  if(pStats)
14892  {
14893  memset(pStats, 0, sizeof(VmaDefragmentationStats));
14894  }
14895 
14897  {
14898  // For incremental defragmetnations, we just earmark how much we can move
14899  // The real meat is in the defragmentation steps
14900  m_MaxCpuBytesToMove = maxCpuBytesToMove;
14901  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
14902 
14903  m_MaxGpuBytesToMove = maxGpuBytesToMove;
14904  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
14905 
14906  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
14907  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
14908  return VK_SUCCESS;
14909 
14910  return VK_NOT_READY;
14911  }
14912 
14913  if(commandBuffer == VK_NULL_HANDLE)
14914  {
14915  maxGpuBytesToMove = 0;
14916  maxGpuAllocationsToMove = 0;
14917  }
14918 
14919  VkResult res = VK_SUCCESS;
14920 
14921  // Process default pools.
14922  for(uint32_t memTypeIndex = 0;
14923  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
14924  ++memTypeIndex)
14925  {
14926  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14927  if(pBlockVectorCtx)
14928  {
14929  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14930  pBlockVectorCtx->GetBlockVector()->Defragment(
14931  pBlockVectorCtx,
14932  pStats, flags,
14933  maxCpuBytesToMove, maxCpuAllocationsToMove,
14934  maxGpuBytesToMove, maxGpuAllocationsToMove,
14935  commandBuffer);
14936  if(pBlockVectorCtx->res != VK_SUCCESS)
14937  {
14938  res = pBlockVectorCtx->res;
14939  }
14940  }
14941  }
14942 
14943  // Process custom pools.
14944  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14945  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
14946  ++customCtxIndex)
14947  {
14948  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14949  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14950  pBlockVectorCtx->GetBlockVector()->Defragment(
14951  pBlockVectorCtx,
14952  pStats, flags,
14953  maxCpuBytesToMove, maxCpuAllocationsToMove,
14954  maxGpuBytesToMove, maxGpuAllocationsToMove,
14955  commandBuffer);
14956  if(pBlockVectorCtx->res != VK_SUCCESS)
14957  {
14958  res = pBlockVectorCtx->res;
14959  }
14960  }
14961 
14962  return res;
14963 }
14964 
14965 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
14966 {
14967  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
14968  uint32_t movesLeft = pInfo->moveCount;
14969 
14970  // Process default pools.
14971  for(uint32_t memTypeIndex = 0;
14972  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14973  ++memTypeIndex)
14974  {
14975  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14976  if(pBlockVectorCtx)
14977  {
14978  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14979 
14980  if(!pBlockVectorCtx->hasDefragmentationPlan)
14981  {
14982  pBlockVectorCtx->GetBlockVector()->Defragment(
14983  pBlockVectorCtx,
14984  m_pStats, m_Flags,
14985  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14986  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14987  VK_NULL_HANDLE);
14988 
14989  if(pBlockVectorCtx->res < VK_SUCCESS)
14990  continue;
14991 
14992  pBlockVectorCtx->hasDefragmentationPlan = true;
14993  }
14994 
14995  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14996  pBlockVectorCtx,
14997  pCurrentMove, movesLeft);
14998 
14999  movesLeft -= processed;
15000  pCurrentMove += processed;
15001  }
15002  }
15003 
15004  // Process custom pools.
15005  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15006  customCtxIndex < customCtxCount;
15007  ++customCtxIndex)
15008  {
15009  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15010  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15011 
15012  if(!pBlockVectorCtx->hasDefragmentationPlan)
15013  {
15014  pBlockVectorCtx->GetBlockVector()->Defragment(
15015  pBlockVectorCtx,
15016  m_pStats, m_Flags,
15017  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
15018  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
15019  VK_NULL_HANDLE);
15020 
15021  if(pBlockVectorCtx->res < VK_SUCCESS)
15022  continue;
15023 
15024  pBlockVectorCtx->hasDefragmentationPlan = true;
15025  }
15026 
15027  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
15028  pBlockVectorCtx,
15029  pCurrentMove, movesLeft);
15030 
15031  movesLeft -= processed;
15032  pCurrentMove += processed;
15033  }
15034 
15035  pInfo->moveCount = pInfo->moveCount - movesLeft;
15036 
15037  return VK_SUCCESS;
15038 }
15039 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
15040 {
15041  VkResult res = VK_SUCCESS;
15042 
15043  // Process default pools.
15044  for(uint32_t memTypeIndex = 0;
15045  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
15046  ++memTypeIndex)
15047  {
15048  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15049  if(pBlockVectorCtx)
15050  {
15051  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15052 
15053  if(!pBlockVectorCtx->hasDefragmentationPlan)
15054  {
15055  res = VK_NOT_READY;
15056  continue;
15057  }
15058 
15059  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
15060  pBlockVectorCtx, m_pStats);
15061 
15062  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15063  res = VK_NOT_READY;
15064  }
15065  }
15066 
15067  // Process custom pools.
15068  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15069  customCtxIndex < customCtxCount;
15070  ++customCtxIndex)
15071  {
15072  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15073  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15074 
15075  if(!pBlockVectorCtx->hasDefragmentationPlan)
15076  {
15077  res = VK_NOT_READY;
15078  continue;
15079  }
15080 
15081  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
15082  pBlockVectorCtx, m_pStats);
15083 
15084  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15085  res = VK_NOT_READY;
15086  }
15087 
15088  return res;
15089 }
15090 
15092 // VmaRecorder
15093 
15094 #if VMA_RECORDING_ENABLED
15095 
15096 VmaRecorder::VmaRecorder() :
15097  m_UseMutex(true),
15098  m_Flags(0),
15099  m_File(VMA_NULL),
15100  m_RecordingStartTime(std::chrono::high_resolution_clock::now())
15101 {
15102 }
15103 
15104 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
15105 {
15106  m_UseMutex = useMutex;
15107  m_Flags = settings.flags;
15108 
15109 #if defined(_WIN32)
15110  // Open file for writing.
15111  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
15112 
15113  if(err != 0)
15114  {
15115  return VK_ERROR_INITIALIZATION_FAILED;
15116  }
15117 #else
15118  // Open file for writing.
15119  m_File = fopen(settings.pFilePath, "wb");
15120 
15121  if(m_File == 0)
15122  {
15123  return VK_ERROR_INITIALIZATION_FAILED;
15124  }
15125 #endif
15126 
15127  // Write header.
15128  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
15129  fprintf(m_File, "%s\n", "1,8");
15130 
15131  return VK_SUCCESS;
15132 }
15133 
15134 VmaRecorder::~VmaRecorder()
15135 {
15136  if(m_File != VMA_NULL)
15137  {
15138  fclose(m_File);
15139  }
15140 }
15141 
15142 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
15143 {
15144  CallParams callParams;
15145  GetBasicParams(callParams);
15146 
15147  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15148  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
15149  Flush();
15150 }
15151 
15152 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
15153 {
15154  CallParams callParams;
15155  GetBasicParams(callParams);
15156 
15157  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15158  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
15159  Flush();
15160 }
15161 
15162 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
15163 {
15164  CallParams callParams;
15165  GetBasicParams(callParams);
15166 
15167  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15168  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
15169  createInfo.memoryTypeIndex,
15170  createInfo.flags,
15171  createInfo.blockSize,
15172  (uint64_t)createInfo.minBlockCount,
15173  (uint64_t)createInfo.maxBlockCount,
15174  createInfo.frameInUseCount,
15175  pool);
15176  Flush();
15177 }
15178 
15179 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
15180 {
15181  CallParams callParams;
15182  GetBasicParams(callParams);
15183 
15184  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15185  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
15186  pool);
15187  Flush();
15188 }
15189 
15190 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
15191  const VkMemoryRequirements& vkMemReq,
15192  const VmaAllocationCreateInfo& createInfo,
15193  VmaAllocation allocation)
15194 {
15195  CallParams callParams;
15196  GetBasicParams(callParams);
15197 
15198  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15199  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15200  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15201  vkMemReq.size,
15202  vkMemReq.alignment,
15203  vkMemReq.memoryTypeBits,
15204  createInfo.flags,
15205  createInfo.usage,
15206  createInfo.requiredFlags,
15207  createInfo.preferredFlags,
15208  createInfo.memoryTypeBits,
15209  createInfo.pool,
15210  allocation,
15211  userDataStr.GetString());
15212  Flush();
15213 }
15214 
15215 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
15216  const VkMemoryRequirements& vkMemReq,
15217  const VmaAllocationCreateInfo& createInfo,
15218  uint64_t allocationCount,
15219  const VmaAllocation* pAllocations)
15220 {
15221  CallParams callParams;
15222  GetBasicParams(callParams);
15223 
15224  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15225  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15226  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
15227  vkMemReq.size,
15228  vkMemReq.alignment,
15229  vkMemReq.memoryTypeBits,
15230  createInfo.flags,
15231  createInfo.usage,
15232  createInfo.requiredFlags,
15233  createInfo.preferredFlags,
15234  createInfo.memoryTypeBits,
15235  createInfo.pool);
15236  PrintPointerList(allocationCount, pAllocations);
15237  fprintf(m_File, ",%s\n", userDataStr.GetString());
15238  Flush();
15239 }
15240 
15241 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
15242  const VkMemoryRequirements& vkMemReq,
15243  bool requiresDedicatedAllocation,
15244  bool prefersDedicatedAllocation,
15245  const VmaAllocationCreateInfo& createInfo,
15246  VmaAllocation allocation)
15247 {
15248  CallParams callParams;
15249  GetBasicParams(callParams);
15250 
15251  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15252  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15253  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15254  vkMemReq.size,
15255  vkMemReq.alignment,
15256  vkMemReq.memoryTypeBits,
15257  requiresDedicatedAllocation ? 1 : 0,
15258  prefersDedicatedAllocation ? 1 : 0,
15259  createInfo.flags,
15260  createInfo.usage,
15261  createInfo.requiredFlags,
15262  createInfo.preferredFlags,
15263  createInfo.memoryTypeBits,
15264  createInfo.pool,
15265  allocation,
15266  userDataStr.GetString());
15267  Flush();
15268 }
15269 
15270 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
15271  const VkMemoryRequirements& vkMemReq,
15272  bool requiresDedicatedAllocation,
15273  bool prefersDedicatedAllocation,
15274  const VmaAllocationCreateInfo& createInfo,
15275  VmaAllocation allocation)
15276 {
15277  CallParams callParams;
15278  GetBasicParams(callParams);
15279 
15280  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15281  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15282  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15283  vkMemReq.size,
15284  vkMemReq.alignment,
15285  vkMemReq.memoryTypeBits,
15286  requiresDedicatedAllocation ? 1 : 0,
15287  prefersDedicatedAllocation ? 1 : 0,
15288  createInfo.flags,
15289  createInfo.usage,
15290  createInfo.requiredFlags,
15291  createInfo.preferredFlags,
15292  createInfo.memoryTypeBits,
15293  createInfo.pool,
15294  allocation,
15295  userDataStr.GetString());
15296  Flush();
15297 }
15298 
15299 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
15300  VmaAllocation allocation)
15301 {
15302  CallParams callParams;
15303  GetBasicParams(callParams);
15304 
15305  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15306  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15307  allocation);
15308  Flush();
15309 }
15310 
15311 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
15312  uint64_t allocationCount,
15313  const VmaAllocation* pAllocations)
15314 {
15315  CallParams callParams;
15316  GetBasicParams(callParams);
15317 
15318  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15319  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
15320  PrintPointerList(allocationCount, pAllocations);
15321  fprintf(m_File, "\n");
15322  Flush();
15323 }
15324 
15325 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
15326  VmaAllocation allocation,
15327  const void* pUserData)
15328 {
15329  CallParams callParams;
15330  GetBasicParams(callParams);
15331 
15332  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15333  UserDataString userDataStr(
15334  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
15335  pUserData);
15336  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15337  allocation,
15338  userDataStr.GetString());
15339  Flush();
15340 }
15341 
15342 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
15343  VmaAllocation allocation)
15344 {
15345  CallParams callParams;
15346  GetBasicParams(callParams);
15347 
15348  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15349  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15350  allocation);
15351  Flush();
15352 }
15353 
15354 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
15355  VmaAllocation allocation)
15356 {
15357  CallParams callParams;
15358  GetBasicParams(callParams);
15359 
15360  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15361  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15362  allocation);
15363  Flush();
15364 }
15365 
15366 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
15367  VmaAllocation allocation)
15368 {
15369  CallParams callParams;
15370  GetBasicParams(callParams);
15371 
15372  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15373  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15374  allocation);
15375  Flush();
15376 }
15377 
15378 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
15379  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15380 {
15381  CallParams callParams;
15382  GetBasicParams(callParams);
15383 
15384  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15385  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15386  allocation,
15387  offset,
15388  size);
15389  Flush();
15390 }
15391 
15392 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
15393  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15394 {
15395  CallParams callParams;
15396  GetBasicParams(callParams);
15397 
15398  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15399  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15400  allocation,
15401  offset,
15402  size);
15403  Flush();
15404 }
15405 
15406 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
15407  const VkBufferCreateInfo& bufCreateInfo,
15408  const VmaAllocationCreateInfo& allocCreateInfo,
15409  VmaAllocation allocation)
15410 {
15411  CallParams callParams;
15412  GetBasicParams(callParams);
15413 
15414  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15415  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15416  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15417  bufCreateInfo.flags,
15418  bufCreateInfo.size,
15419  bufCreateInfo.usage,
15420  bufCreateInfo.sharingMode,
15421  allocCreateInfo.flags,
15422  allocCreateInfo.usage,
15423  allocCreateInfo.requiredFlags,
15424  allocCreateInfo.preferredFlags,
15425  allocCreateInfo.memoryTypeBits,
15426  allocCreateInfo.pool,
15427  allocation,
15428  userDataStr.GetString());
15429  Flush();
15430 }
15431 
15432 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
15433  const VkImageCreateInfo& imageCreateInfo,
15434  const VmaAllocationCreateInfo& allocCreateInfo,
15435  VmaAllocation allocation)
15436 {
15437  CallParams callParams;
15438  GetBasicParams(callParams);
15439 
15440  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15441  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15442  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15443  imageCreateInfo.flags,
15444  imageCreateInfo.imageType,
15445  imageCreateInfo.format,
15446  imageCreateInfo.extent.width,
15447  imageCreateInfo.extent.height,
15448  imageCreateInfo.extent.depth,
15449  imageCreateInfo.mipLevels,
15450  imageCreateInfo.arrayLayers,
15451  imageCreateInfo.samples,
15452  imageCreateInfo.tiling,
15453  imageCreateInfo.usage,
15454  imageCreateInfo.sharingMode,
15455  imageCreateInfo.initialLayout,
15456  allocCreateInfo.flags,
15457  allocCreateInfo.usage,
15458  allocCreateInfo.requiredFlags,
15459  allocCreateInfo.preferredFlags,
15460  allocCreateInfo.memoryTypeBits,
15461  allocCreateInfo.pool,
15462  allocation,
15463  userDataStr.GetString());
15464  Flush();
15465 }
15466 
15467 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
15468  VmaAllocation allocation)
15469 {
15470  CallParams callParams;
15471  GetBasicParams(callParams);
15472 
15473  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15474  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
15475  allocation);
15476  Flush();
15477 }
15478 
15479 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
15480  VmaAllocation allocation)
15481 {
15482  CallParams callParams;
15483  GetBasicParams(callParams);
15484 
15485  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15486  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
15487  allocation);
15488  Flush();
15489 }
15490 
15491 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
15492  VmaAllocation allocation)
15493 {
15494  CallParams callParams;
15495  GetBasicParams(callParams);
15496 
15497  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15498  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15499  allocation);
15500  Flush();
15501 }
15502 
15503 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
15504  VmaAllocation allocation)
15505 {
15506  CallParams callParams;
15507  GetBasicParams(callParams);
15508 
15509  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15510  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
15511  allocation);
15512  Flush();
15513 }
15514 
15515 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
15516  VmaPool pool)
15517 {
15518  CallParams callParams;
15519  GetBasicParams(callParams);
15520 
15521  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15522  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
15523  pool);
15524  Flush();
15525 }
15526 
15527 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
15528  const VmaDefragmentationInfo2& info,
15530 {
15531  CallParams callParams;
15532  GetBasicParams(callParams);
15533 
15534  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15535  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
15536  info.flags);
15537  PrintPointerList(info.allocationCount, info.pAllocations);
15538  fprintf(m_File, ",");
15539  PrintPointerList(info.poolCount, info.pPools);
15540  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
15541  info.maxCpuBytesToMove,
15543  info.maxGpuBytesToMove,
15545  info.commandBuffer,
15546  ctx);
15547  Flush();
15548 }
15549 
15550 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
15552 {
15553  CallParams callParams;
15554  GetBasicParams(callParams);
15555 
15556  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15557  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
15558  ctx);
15559  Flush();
15560 }
15561 
15562 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
15563  VmaPool pool,
15564  const char* name)
15565 {
15566  CallParams callParams;
15567  GetBasicParams(callParams);
15568 
15569  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15570  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15571  pool, name != VMA_NULL ? name : "");
15572  Flush();
15573 }
15574 
15575 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
15576 {
15577  if(pUserData != VMA_NULL)
15578  {
15579  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
15580  {
15581  m_Str = (const char*)pUserData;
15582  }
15583  else
15584  {
15585  // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it.
15586  snprintf(m_PtrStr, 17, "%p", pUserData);
15587  m_Str = m_PtrStr;
15588  }
15589  }
15590  else
15591  {
15592  m_Str = "";
15593  }
15594 }
15595 
15596 void VmaRecorder::WriteConfiguration(
15597  const VkPhysicalDeviceProperties& devProps,
15598  const VkPhysicalDeviceMemoryProperties& memProps,
15599  uint32_t vulkanApiVersion,
15600  bool dedicatedAllocationExtensionEnabled,
15601  bool bindMemory2ExtensionEnabled,
15602  bool memoryBudgetExtensionEnabled,
15603  bool deviceCoherentMemoryExtensionEnabled)
15604 {
15605  fprintf(m_File, "Config,Begin\n");
15606 
15607  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
15608 
15609  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
15610  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
15611  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
15612  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
15613  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
15614  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
15615 
15616  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
15617  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
15618  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
15619 
15620  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
15621  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
15622  {
15623  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
15624  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
15625  }
15626  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
15627  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
15628  {
15629  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
15630  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
15631  }
15632 
15633  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
15634  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
15635  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
15636  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
15637 
15638  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
15639  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
15640  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
15641  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
15642  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
15643  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
15644  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
15645  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
15646  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15647 
15648  fprintf(m_File, "Config,End\n");
15649 }
15650 
15651 void VmaRecorder::GetBasicParams(CallParams& outParams)
15652 {
15653  #if defined(_WIN32)
15654  outParams.threadId = GetCurrentThreadId();
15655  #else
15656  // Use C++11 features to get thread id and convert it to uint32_t.
15657  // There is room for optimization since sstream is quite slow.
15658  // Is there a better way to convert std::this_thread::get_id() to uint32_t?
15659  std::thread::id thread_id = std::this_thread::get_id();
15660  std::stringstream thread_id_to_string_converter;
15661  thread_id_to_string_converter << thread_id;
15662  std::string thread_id_as_string = thread_id_to_string_converter.str();
15663  outParams.threadId = static_cast<uint32_t>(std::stoi(thread_id_as_string.c_str()));
15664  #endif
15665 
15666  auto current_time = std::chrono::high_resolution_clock::now();
15667 
15668  outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
15669 }
15670 
15671 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
15672 {
15673  if(count)
15674  {
15675  fprintf(m_File, "%p", pItems[0]);
15676  for(uint64_t i = 1; i < count; ++i)
15677  {
15678  fprintf(m_File, " %p", pItems[i]);
15679  }
15680  }
15681 }
15682 
15683 void VmaRecorder::Flush()
15684 {
15685  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
15686  {
15687  fflush(m_File);
15688  }
15689 }
15690 
15691 #endif // #if VMA_RECORDING_ENABLED
15692 
15694 // VmaAllocationObjectAllocator
15695 
15696 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
15697  m_Allocator(pAllocationCallbacks, 1024)
15698 {
15699 }
15700 
15701 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
15702 {
15703  VmaMutexLock mutexLock(m_Mutex);
15704  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
15705 }
15706 
15707 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
15708 {
15709  VmaMutexLock mutexLock(m_Mutex);
15710  m_Allocator.Free(hAlloc);
15711 }
15712 
15714 // VmaAllocator_T
15715 
15716 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
15717  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
15718  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
15719  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
15720  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
15721  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
15722  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
15723  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
15724  m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
15725  m_hDevice(pCreateInfo->device),
15726  m_hInstance(pCreateInfo->instance),
15727  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
15728  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
15729  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
15730  m_AllocationObjectAllocator(&m_AllocationCallbacks),
15731  m_HeapSizeLimitMask(0),
15732  m_PreferredLargeHeapBlockSize(0),
15733  m_PhysicalDevice(pCreateInfo->physicalDevice),
15734  m_CurrentFrameIndex(0),
15735  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
15736  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
15737  m_NextPoolId(0),
15738  m_GlobalMemoryTypeBits(UINT32_MAX)
15740  ,m_pRecorder(VMA_NULL)
15741 #endif
15742 {
15743  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15744  {
15745  m_UseKhrDedicatedAllocation = false;
15746  m_UseKhrBindMemory2 = false;
15747  }
15748 
15749  if(VMA_DEBUG_DETECT_CORRUPTION)
15750  {
15751  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
15752  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
15753  }
15754 
15755  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
15756 
15757  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15758  {
15759 #if !(VMA_DEDICATED_ALLOCATION)
15761  {
15762  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
15763  }
15764 #endif
15765 #if !(VMA_BIND_MEMORY2)
15766  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
15767  {
15768  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
15769  }
15770 #endif
15771  }
15772 #if !(VMA_MEMORY_BUDGET)
15773  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
15774  {
15775  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
15776  }
15777 #endif
15778 #if !(VMA_BUFFER_DEVICE_ADDRESS)
15779  if(m_UseKhrBufferDeviceAddress)
15780  {
15781  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
15782  }
15783 #endif
15784 #if VMA_VULKAN_VERSION < 1002000
15785  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
15786  {
15787  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
15788  }
15789 #endif
15790 #if VMA_VULKAN_VERSION < 1001000
15791  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15792  {
15793  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
15794  }
15795 #endif
15796 #if !(VMA_MEMORY_PRIORITY)
15797  if(m_UseExtMemoryPriority)
15798  {
15799  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
15800  }
15801 #endif
15802 
15803  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
15804  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
15805  memset(&m_MemProps, 0, sizeof(m_MemProps));
15806 
15807  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
15808  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
15809  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
15810 
15811  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
15812  {
15813  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
15814  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
15815  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
15816  }
15817 
15818  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
15819 
15820  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
15821  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
15822 
15823  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
15824  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
15825  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
15826  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
15827 
15828  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
15829  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15830 
15831  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
15832 
15833  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
15834  {
15835  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15836  {
15837  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
15838  if(limit != VK_WHOLE_SIZE)
15839  {
15840  m_HeapSizeLimitMask |= 1u << heapIndex;
15841  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
15842  {
15843  m_MemProps.memoryHeaps[heapIndex].size = limit;
15844  }
15845  }
15846  }
15847  }
15848 
15849  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15850  {
15851  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
15852 
15853  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
15854  this,
15855  VK_NULL_HANDLE, // hParentPool
15856  memTypeIndex,
15857  preferredBlockSize,
15858  0,
15859  SIZE_MAX,
15860  GetBufferImageGranularity(),
15861  pCreateInfo->frameInUseCount,
15862  false, // explicitBlockSize
15863  false, // linearAlgorithm
15864  0.5f); // priority (0.5 is the default per Vulkan spec)
15865  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
15866  // becase minBlockCount is 0.
15867  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
15868 
15869  }
15870 }
15871 
15872 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
15873 {
15874  VkResult res = VK_SUCCESS;
15875 
15876  if(pCreateInfo->pRecordSettings != VMA_NULL &&
15877  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
15878  {
15879 #if VMA_RECORDING_ENABLED
15880  m_pRecorder = vma_new(this, VmaRecorder)();
15881  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
15882  if(res != VK_SUCCESS)
15883  {
15884  return res;
15885  }
15886  m_pRecorder->WriteConfiguration(
15887  m_PhysicalDeviceProperties,
15888  m_MemProps,
15889  m_VulkanApiVersion,
15890  m_UseKhrDedicatedAllocation,
15891  m_UseKhrBindMemory2,
15892  m_UseExtMemoryBudget,
15893  m_UseAmdDeviceCoherentMemory);
15894  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
15895 #else
15896  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
15897  return VK_ERROR_FEATURE_NOT_PRESENT;
15898 #endif
15899  }
15900 
15901 #if VMA_MEMORY_BUDGET
15902  if(m_UseExtMemoryBudget)
15903  {
15904  UpdateVulkanBudget();
15905  }
15906 #endif // #if VMA_MEMORY_BUDGET
15907 
15908  return res;
15909 }
15910 
15911 VmaAllocator_T::~VmaAllocator_T()
15912 {
15913 #if VMA_RECORDING_ENABLED
15914  if(m_pRecorder != VMA_NULL)
15915  {
15916  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
15917  vma_delete(this, m_pRecorder);
15918  }
15919 #endif
15920 
15921  VMA_ASSERT(m_Pools.empty());
15922 
15923  for(size_t i = GetMemoryTypeCount(); i--; )
15924  {
15925  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
15926  {
15927  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
15928  }
15929 
15930  vma_delete(this, m_pDedicatedAllocations[i]);
15931  vma_delete(this, m_pBlockVectors[i]);
15932  }
15933 }
15934 
15935 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
15936 {
15937 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15938  ImportVulkanFunctions_Static();
15939 #endif
15940 
15941  if(pVulkanFunctions != VMA_NULL)
15942  {
15943  ImportVulkanFunctions_Custom(pVulkanFunctions);
15944  }
15945 
15946 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15947  ImportVulkanFunctions_Dynamic();
15948 #endif
15949 
15950  ValidateVulkanFunctions();
15951 }
15952 
15953 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15954 
15955 void VmaAllocator_T::ImportVulkanFunctions_Static()
15956 {
15957  // Vulkan 1.0
15958  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
15959  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
15960  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
15961  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
15962  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
15963  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
15964  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
15965  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
15966  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
15967  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
15968  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
15969  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
15970  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
15971  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
15972  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
15973  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
15974  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
15975 
15976  // Vulkan 1.1
15977 #if VMA_VULKAN_VERSION >= 1001000
15978  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15979  {
15980  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
15981  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
15982  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
15983  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
15984  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
15985  }
15986 #endif
15987 }
15988 
15989 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15990 
15991 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
15992 {
15993  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
15994 
15995 #define VMA_COPY_IF_NOT_NULL(funcName) \
15996  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
15997 
15998  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
15999  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
16000  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
16001  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
16002  VMA_COPY_IF_NOT_NULL(vkMapMemory);
16003  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
16004  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
16005  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
16006  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
16007  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
16008  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
16009  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
16010  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
16011  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
16012  VMA_COPY_IF_NOT_NULL(vkCreateImage);
16013  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
16014  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
16015 
16016 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16017  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
16018  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
16019 #endif
16020 
16021 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
16022  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
16023  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
16024 #endif
16025 
16026 #if VMA_MEMORY_BUDGET
16027  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
16028 #endif
16029 
16030 #undef VMA_COPY_IF_NOT_NULL
16031 }
16032 
16033 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16034 
16035 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
16036 {
16037 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
16038  if(m_VulkanFunctions.memberName == VMA_NULL) \
16039  m_VulkanFunctions.memberName = \
16040  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
16041 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
16042  if(m_VulkanFunctions.memberName == VMA_NULL) \
16043  m_VulkanFunctions.memberName = \
16044  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
16045 
16046  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
16047  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
16048  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
16049  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
16050  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
16051  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
16052  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
16053  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
16054  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
16055  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
16056  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
16057  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
16058  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
16059  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
16060  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
16061  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
16062  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
16063 
16064 #if VMA_VULKAN_VERSION >= 1001000
16065  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16066  {
16067  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
16068  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
16069  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
16070  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
16071  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
16072  }
16073 #endif
16074 
16075 #if VMA_DEDICATED_ALLOCATION
16076  if(m_UseKhrDedicatedAllocation)
16077  {
16078  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
16079  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
16080  }
16081 #endif
16082 
16083 #if VMA_BIND_MEMORY2
16084  if(m_UseKhrBindMemory2)
16085  {
16086  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
16087  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
16088  }
16089 #endif // #if VMA_BIND_MEMORY2
16090 
16091 #if VMA_MEMORY_BUDGET
16092  if(m_UseExtMemoryBudget)
16093  {
16094  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
16095  }
16096 #endif // #if VMA_MEMORY_BUDGET
16097 
16098 #undef VMA_FETCH_DEVICE_FUNC
16099 #undef VMA_FETCH_INSTANCE_FUNC
16100 }
16101 
16102 #endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16103 
16104 void VmaAllocator_T::ValidateVulkanFunctions()
16105 {
16106  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
16107  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
16108  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
16109  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
16110  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
16111  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
16112  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
16113  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
16114  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
16115  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
16116  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
16117  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
16118  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
16119  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
16120  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
16121  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
16122  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
16123 
16124 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16125  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
16126  {
16127  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
16128  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
16129  }
16130 #endif
16131 
16132 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
16133  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
16134  {
16135  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
16136  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
16137  }
16138 #endif
16139 
16140 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
16141  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16142  {
16143  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
16144  }
16145 #endif
16146 }
16147 
16148 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
16149 {
16150  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16151  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16152  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
16153  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
16154 }
16155 
16156 VkResult VmaAllocator_T::AllocateMemoryOfType(
16157  VkDeviceSize size,
16158  VkDeviceSize alignment,
16159  bool dedicatedAllocation,
16160  VkBuffer dedicatedBuffer,
16161  VkBufferUsageFlags dedicatedBufferUsage,
16162  VkImage dedicatedImage,
16163  const VmaAllocationCreateInfo& createInfo,
16164  uint32_t memTypeIndex,
16165  VmaSuballocationType suballocType,
16166  size_t allocationCount,
16167  VmaAllocation* pAllocations)
16168 {
16169  VMA_ASSERT(pAllocations != VMA_NULL);
16170  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
16171 
16172  VmaAllocationCreateInfo finalCreateInfo = createInfo;
16173 
16174  // If memory type is not HOST_VISIBLE, disable MAPPED.
16175  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16176  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16177  {
16178  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16179  }
16180  // If memory is lazily allocated, it should be always dedicated.
16181  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
16182  {
16184  }
16185 
16186  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
16187  VMA_ASSERT(blockVector);
16188 
16189  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
16190  bool preferDedicatedMemory =
16191  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
16192  dedicatedAllocation ||
16193  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
16194  size > preferredBlockSize / 2;
16195 
16196  if(preferDedicatedMemory &&
16197  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
16198  finalCreateInfo.pool == VK_NULL_HANDLE)
16199  {
16201  }
16202 
16203  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
16204  {
16205  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16206  {
16207  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16208  }
16209  else
16210  {
16211  return AllocateDedicatedMemory(
16212  size,
16213  suballocType,
16214  memTypeIndex,
16215  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16216  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16217  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16218  finalCreateInfo.pUserData,
16219  finalCreateInfo.priority,
16220  dedicatedBuffer,
16221  dedicatedBufferUsage,
16222  dedicatedImage,
16223  allocationCount,
16224  pAllocations);
16225  }
16226  }
16227  else
16228  {
16229  VkResult res = blockVector->Allocate(
16230  m_CurrentFrameIndex.load(),
16231  size,
16232  alignment,
16233  finalCreateInfo,
16234  suballocType,
16235  allocationCount,
16236  pAllocations);
16237  if(res == VK_SUCCESS)
16238  {
16239  return res;
16240  }
16241 
16242  // 5. Try dedicated memory.
16243  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16244  {
16245  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16246  }
16247  else
16248  {
16249  res = AllocateDedicatedMemory(
16250  size,
16251  suballocType,
16252  memTypeIndex,
16253  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16254  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16255  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16256  finalCreateInfo.pUserData,
16257  finalCreateInfo.priority,
16258  dedicatedBuffer,
16259  dedicatedBufferUsage,
16260  dedicatedImage,
16261  allocationCount,
16262  pAllocations);
16263  if(res == VK_SUCCESS)
16264  {
16265  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
16266  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
16267  return VK_SUCCESS;
16268  }
16269  else
16270  {
16271  // Everything failed: Return error code.
16272  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16273  return res;
16274  }
16275  }
16276  }
16277 }
16278 
16279 VkResult VmaAllocator_T::AllocateDedicatedMemory(
16280  VkDeviceSize size,
16281  VmaSuballocationType suballocType,
16282  uint32_t memTypeIndex,
16283  bool withinBudget,
16284  bool map,
16285  bool isUserDataString,
16286  void* pUserData,
16287  float priority,
16288  VkBuffer dedicatedBuffer,
16289  VkBufferUsageFlags dedicatedBufferUsage,
16290  VkImage dedicatedImage,
16291  size_t allocationCount,
16292  VmaAllocation* pAllocations)
16293 {
16294  VMA_ASSERT(allocationCount > 0 && pAllocations);
16295 
16296  if(withinBudget)
16297  {
16298  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16299  VmaBudget heapBudget = {};
16300  GetBudget(&heapBudget, heapIndex, 1);
16301  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
16302  {
16303  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16304  }
16305  }
16306 
16307  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
16308  allocInfo.memoryTypeIndex = memTypeIndex;
16309  allocInfo.allocationSize = size;
16310 
16311 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16312  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
16313  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16314  {
16315  if(dedicatedBuffer != VK_NULL_HANDLE)
16316  {
16317  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
16318  dedicatedAllocInfo.buffer = dedicatedBuffer;
16319  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16320  }
16321  else if(dedicatedImage != VK_NULL_HANDLE)
16322  {
16323  dedicatedAllocInfo.image = dedicatedImage;
16324  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16325  }
16326  }
16327 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16328 
16329 #if VMA_BUFFER_DEVICE_ADDRESS
16330  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
16331  if(m_UseKhrBufferDeviceAddress)
16332  {
16333  bool canContainBufferWithDeviceAddress = true;
16334  if(dedicatedBuffer != VK_NULL_HANDLE)
16335  {
16336  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
16337  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
16338  }
16339  else if(dedicatedImage != VK_NULL_HANDLE)
16340  {
16341  canContainBufferWithDeviceAddress = false;
16342  }
16343  if(canContainBufferWithDeviceAddress)
16344  {
16345  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
16346  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
16347  }
16348  }
16349 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
16350 
16351 #if VMA_MEMORY_PRIORITY
16352  VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
16353  if(m_UseExtMemoryPriority)
16354  {
16355  priorityInfo.priority = priority;
16356  VmaPnextChainPushFront(&allocInfo, &priorityInfo);
16357  }
16358 #endif // #if VMA_MEMORY_PRIORITY
16359 
16360  size_t allocIndex;
16361  VkResult res = VK_SUCCESS;
16362  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16363  {
16364  res = AllocateDedicatedMemoryPage(
16365  size,
16366  suballocType,
16367  memTypeIndex,
16368  allocInfo,
16369  map,
16370  isUserDataString,
16371  pUserData,
16372  pAllocations + allocIndex);
16373  if(res != VK_SUCCESS)
16374  {
16375  break;
16376  }
16377  }
16378 
16379  if(res == VK_SUCCESS)
16380  {
16381  // Register them in m_pDedicatedAllocations.
16382  {
16383  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16384  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16385  VMA_ASSERT(pDedicatedAllocations);
16386  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16387  {
16388  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
16389  }
16390  }
16391 
16392  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
16393  }
16394  else
16395  {
16396  // Free all already created allocations.
16397  while(allocIndex--)
16398  {
16399  VmaAllocation currAlloc = pAllocations[allocIndex];
16400  VkDeviceMemory hMemory = currAlloc->GetMemory();
16401 
16402  /*
16403  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16404  before vkFreeMemory.
16405 
16406  if(currAlloc->GetMappedData() != VMA_NULL)
16407  {
16408  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16409  }
16410  */
16411 
16412  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
16413  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
16414  currAlloc->SetUserData(this, VMA_NULL);
16415  m_AllocationObjectAllocator.Free(currAlloc);
16416  }
16417 
16418  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16419  }
16420 
16421  return res;
16422 }
16423 
16424 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
16425  VkDeviceSize size,
16426  VmaSuballocationType suballocType,
16427  uint32_t memTypeIndex,
16428  const VkMemoryAllocateInfo& allocInfo,
16429  bool map,
16430  bool isUserDataString,
16431  void* pUserData,
16432  VmaAllocation* pAllocation)
16433 {
16434  VkDeviceMemory hMemory = VK_NULL_HANDLE;
16435  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
16436  if(res < 0)
16437  {
16438  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16439  return res;
16440  }
16441 
16442  void* pMappedData = VMA_NULL;
16443  if(map)
16444  {
16445  res = (*m_VulkanFunctions.vkMapMemory)(
16446  m_hDevice,
16447  hMemory,
16448  0,
16449  VK_WHOLE_SIZE,
16450  0,
16451  &pMappedData);
16452  if(res < 0)
16453  {
16454  VMA_DEBUG_LOG(" vkMapMemory FAILED");
16455  FreeVulkanMemory(memTypeIndex, size, hMemory);
16456  return res;
16457  }
16458  }
16459 
16460  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
16461  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
16462  (*pAllocation)->SetUserData(this, pUserData);
16463  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
16464  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16465  {
16466  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
16467  }
16468 
16469  return VK_SUCCESS;
16470 }
16471 
16472 void VmaAllocator_T::GetBufferMemoryRequirements(
16473  VkBuffer hBuffer,
16474  VkMemoryRequirements& memReq,
16475  bool& requiresDedicatedAllocation,
16476  bool& prefersDedicatedAllocation) const
16477 {
16478 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16479  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16480  {
16481  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
16482  memReqInfo.buffer = hBuffer;
16483 
16484  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16485 
16486  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16487  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16488 
16489  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16490 
16491  memReq = memReq2.memoryRequirements;
16492  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16493  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16494  }
16495  else
16496 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16497  {
16498  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
16499  requiresDedicatedAllocation = false;
16500  prefersDedicatedAllocation = false;
16501  }
16502 }
16503 
16504 void VmaAllocator_T::GetImageMemoryRequirements(
16505  VkImage hImage,
16506  VkMemoryRequirements& memReq,
16507  bool& requiresDedicatedAllocation,
16508  bool& prefersDedicatedAllocation) const
16509 {
16510 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16511  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16512  {
16513  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
16514  memReqInfo.image = hImage;
16515 
16516  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16517 
16518  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16519  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16520 
16521  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16522 
16523  memReq = memReq2.memoryRequirements;
16524  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16525  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16526  }
16527  else
16528 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16529  {
16530  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
16531  requiresDedicatedAllocation = false;
16532  prefersDedicatedAllocation = false;
16533  }
16534 }
16535 
16536 VkResult VmaAllocator_T::AllocateMemory(
16537  const VkMemoryRequirements& vkMemReq,
16538  bool requiresDedicatedAllocation,
16539  bool prefersDedicatedAllocation,
16540  VkBuffer dedicatedBuffer,
16541  VkBufferUsageFlags dedicatedBufferUsage,
16542  VkImage dedicatedImage,
16543  const VmaAllocationCreateInfo& createInfo,
16544  VmaSuballocationType suballocType,
16545  size_t allocationCount,
16546  VmaAllocation* pAllocations)
16547 {
16548  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16549 
16550  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
16551 
16552  if(vkMemReq.size == 0)
16553  {
16554  return VK_ERROR_VALIDATION_FAILED_EXT;
16555  }
16556  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
16557  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16558  {
16559  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
16560  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16561  }
16562  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16564  {
16565  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
16566  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16567  }
16568  if(requiresDedicatedAllocation)
16569  {
16570  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16571  {
16572  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
16573  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16574  }
16575  if(createInfo.pool != VK_NULL_HANDLE)
16576  {
16577  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
16578  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16579  }
16580  }
16581  if((createInfo.pool != VK_NULL_HANDLE) &&
16582  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
16583  {
16584  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
16585  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16586  }
16587 
16588  if(createInfo.pool != VK_NULL_HANDLE)
16589  {
16590  const VkDeviceSize alignmentForPool = VMA_MAX(
16591  vkMemReq.alignment,
16592  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
16593 
16594  VmaAllocationCreateInfo createInfoForPool = createInfo;
16595  // If memory type is not HOST_VISIBLE, disable MAPPED.
16596  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16597  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16598  {
16599  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16600  }
16601 
16602  return createInfo.pool->m_BlockVector.Allocate(
16603  m_CurrentFrameIndex.load(),
16604  vkMemReq.size,
16605  alignmentForPool,
16606  createInfoForPool,
16607  suballocType,
16608  allocationCount,
16609  pAllocations);
16610  }
16611  else
16612  {
16613  // Bit mask of memory Vulkan types acceptable for this allocation.
16614  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
16615  uint32_t memTypeIndex = UINT32_MAX;
16616  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16617  if(res == VK_SUCCESS)
16618  {
16619  VkDeviceSize alignmentForMemType = VMA_MAX(
16620  vkMemReq.alignment,
16621  GetMemoryTypeMinAlignment(memTypeIndex));
16622 
16623  res = AllocateMemoryOfType(
16624  vkMemReq.size,
16625  alignmentForMemType,
16626  requiresDedicatedAllocation || prefersDedicatedAllocation,
16627  dedicatedBuffer,
16628  dedicatedBufferUsage,
16629  dedicatedImage,
16630  createInfo,
16631  memTypeIndex,
16632  suballocType,
16633  allocationCount,
16634  pAllocations);
16635  // Succeeded on first try.
16636  if(res == VK_SUCCESS)
16637  {
16638  return res;
16639  }
16640  // Allocation from this memory type failed. Try other compatible memory types.
16641  else
16642  {
16643  for(;;)
16644  {
16645  // Remove old memTypeIndex from list of possibilities.
16646  memoryTypeBits &= ~(1u << memTypeIndex);
16647  // Find alternative memTypeIndex.
16648  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16649  if(res == VK_SUCCESS)
16650  {
16651  alignmentForMemType = VMA_MAX(
16652  vkMemReq.alignment,
16653  GetMemoryTypeMinAlignment(memTypeIndex));
16654 
16655  res = AllocateMemoryOfType(
16656  vkMemReq.size,
16657  alignmentForMemType,
16658  requiresDedicatedAllocation || prefersDedicatedAllocation,
16659  dedicatedBuffer,
16660  dedicatedBufferUsage,
16661  dedicatedImage,
16662  createInfo,
16663  memTypeIndex,
16664  suballocType,
16665  allocationCount,
16666  pAllocations);
16667  // Allocation from this alternative memory type succeeded.
16668  if(res == VK_SUCCESS)
16669  {
16670  return res;
16671  }
16672  // else: Allocation from this memory type failed. Try next one - next loop iteration.
16673  }
16674  // No other matching memory type index could be found.
16675  else
16676  {
16677  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
16678  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16679  }
16680  }
16681  }
16682  }
16683  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
16684  else
16685  return res;
16686  }
16687 }
16688 
16689 void VmaAllocator_T::FreeMemory(
16690  size_t allocationCount,
16691  const VmaAllocation* pAllocations)
16692 {
16693  VMA_ASSERT(pAllocations);
16694 
16695  for(size_t allocIndex = allocationCount; allocIndex--; )
16696  {
16697  VmaAllocation allocation = pAllocations[allocIndex];
16698 
16699  if(allocation != VK_NULL_HANDLE)
16700  {
16701  if(TouchAllocation(allocation))
16702  {
16703  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16704  {
16705  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
16706  }
16707 
16708  switch(allocation->GetType())
16709  {
16710  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16711  {
16712  VmaBlockVector* pBlockVector = VMA_NULL;
16713  VmaPool hPool = allocation->GetBlock()->GetParentPool();
16714  if(hPool != VK_NULL_HANDLE)
16715  {
16716  pBlockVector = &hPool->m_BlockVector;
16717  }
16718  else
16719  {
16720  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16721  pBlockVector = m_pBlockVectors[memTypeIndex];
16722  }
16723  pBlockVector->Free(allocation);
16724  }
16725  break;
16726  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16727  FreeDedicatedMemory(allocation);
16728  break;
16729  default:
16730  VMA_ASSERT(0);
16731  }
16732  }
16733 
16734  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
16735  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
16736  allocation->SetUserData(this, VMA_NULL);
16737  m_AllocationObjectAllocator.Free(allocation);
16738  }
16739  }
16740 }
16741 
16742 VkResult VmaAllocator_T::ResizeAllocation(
16743  const VmaAllocation alloc,
16744  VkDeviceSize newSize)
16745 {
16746  // This function is deprecated and so it does nothing. It's left for backward compatibility.
16747  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
16748  {
16749  return VK_ERROR_VALIDATION_FAILED_EXT;
16750  }
16751  if(newSize == alloc->GetSize())
16752  {
16753  return VK_SUCCESS;
16754  }
16755  return VK_ERROR_OUT_OF_POOL_MEMORY;
16756 }
16757 
16758 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
16759 {
16760  // Initialize.
16761  InitStatInfo(pStats->total);
16762  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
16763  InitStatInfo(pStats->memoryType[i]);
16764  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
16765  InitStatInfo(pStats->memoryHeap[i]);
16766 
16767  // Process default pools.
16768  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16769  {
16770  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16771  VMA_ASSERT(pBlockVector);
16772  pBlockVector->AddStats(pStats);
16773  }
16774 
16775  // Process custom pools.
16776  {
16777  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16778  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16779  {
16780  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
16781  }
16782  }
16783 
16784  // Process dedicated allocations.
16785  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16786  {
16787  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16788  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16789  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16790  VMA_ASSERT(pDedicatedAllocVector);
16791  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
16792  {
16793  VmaStatInfo allocationStatInfo;
16794  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
16795  VmaAddStatInfo(pStats->total, allocationStatInfo);
16796  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
16797  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
16798  }
16799  }
16800 
16801  // Postprocess.
16802  VmaPostprocessCalcStatInfo(pStats->total);
16803  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
16804  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
16805  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
16806  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
16807 }
16808 
16809 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
16810 {
16811 #if VMA_MEMORY_BUDGET
16812  if(m_UseExtMemoryBudget)
16813  {
16814  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
16815  {
16816  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
16817  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16818  {
16819  const uint32_t heapIndex = firstHeap + i;
16820 
16821  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16822  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16823 
16824  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
16825  {
16826  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
16827  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
16828  }
16829  else
16830  {
16831  outBudget->usage = 0;
16832  }
16833 
16834  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
16835  outBudget->budget = VMA_MIN(
16836  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
16837  }
16838  }
16839  else
16840  {
16841  UpdateVulkanBudget(); // Outside of mutex lock
16842  GetBudget(outBudget, firstHeap, heapCount); // Recursion
16843  }
16844  }
16845  else
16846 #endif
16847  {
16848  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16849  {
16850  const uint32_t heapIndex = firstHeap + i;
16851 
16852  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16853  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16854 
16855  outBudget->usage = outBudget->blockBytes;
16856  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
16857  }
16858  }
16859 }
16860 
16861 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
16862 
16863 VkResult VmaAllocator_T::DefragmentationBegin(
16864  const VmaDefragmentationInfo2& info,
16865  VmaDefragmentationStats* pStats,
16866  VmaDefragmentationContext* pContext)
16867 {
16868  if(info.pAllocationsChanged != VMA_NULL)
16869  {
16870  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
16871  }
16872 
16873  *pContext = vma_new(this, VmaDefragmentationContext_T)(
16874  this, m_CurrentFrameIndex.load(), info.flags, pStats);
16875 
16876  (*pContext)->AddPools(info.poolCount, info.pPools);
16877  (*pContext)->AddAllocations(
16879 
16880  VkResult res = (*pContext)->Defragment(
16883  info.commandBuffer, pStats, info.flags);
16884 
16885  if(res != VK_NOT_READY)
16886  {
16887  vma_delete(this, *pContext);
16888  *pContext = VMA_NULL;
16889  }
16890 
16891  return res;
16892 }
16893 
16894 VkResult VmaAllocator_T::DefragmentationEnd(
16895  VmaDefragmentationContext context)
16896 {
16897  vma_delete(this, context);
16898  return VK_SUCCESS;
16899 }
16900 
16901 VkResult VmaAllocator_T::DefragmentationPassBegin(
16903  VmaDefragmentationContext context)
16904 {
16905  return context->DefragmentPassBegin(pInfo);
16906 }
16907 VkResult VmaAllocator_T::DefragmentationPassEnd(
16908  VmaDefragmentationContext context)
16909 {
16910  return context->DefragmentPassEnd();
16911 
16912 }
16913 
16914 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
16915 {
16916  if(hAllocation->CanBecomeLost())
16917  {
16918  /*
16919  Warning: This is a carefully designed algorithm.
16920  Do not modify unless you really know what you're doing :)
16921  */
16922  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16923  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16924  for(;;)
16925  {
16926  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16927  {
16928  pAllocationInfo->memoryType = UINT32_MAX;
16929  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
16930  pAllocationInfo->offset = 0;
16931  pAllocationInfo->size = hAllocation->GetSize();
16932  pAllocationInfo->pMappedData = VMA_NULL;
16933  pAllocationInfo->pUserData = hAllocation->GetUserData();
16934  return;
16935  }
16936  else if(localLastUseFrameIndex == localCurrFrameIndex)
16937  {
16938  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16939  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16940  pAllocationInfo->offset = hAllocation->GetOffset();
16941  pAllocationInfo->size = hAllocation->GetSize();
16942  pAllocationInfo->pMappedData = VMA_NULL;
16943  pAllocationInfo->pUserData = hAllocation->GetUserData();
16944  return;
16945  }
16946  else // Last use time earlier than current time.
16947  {
16948  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16949  {
16950  localLastUseFrameIndex = localCurrFrameIndex;
16951  }
16952  }
16953  }
16954  }
16955  else
16956  {
16957 #if VMA_STATS_STRING_ENABLED
16958  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16959  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16960  for(;;)
16961  {
16962  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16963  if(localLastUseFrameIndex == localCurrFrameIndex)
16964  {
16965  break;
16966  }
16967  else // Last use time earlier than current time.
16968  {
16969  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16970  {
16971  localLastUseFrameIndex = localCurrFrameIndex;
16972  }
16973  }
16974  }
16975 #endif
16976 
16977  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16978  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16979  pAllocationInfo->offset = hAllocation->GetOffset();
16980  pAllocationInfo->size = hAllocation->GetSize();
16981  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
16982  pAllocationInfo->pUserData = hAllocation->GetUserData();
16983  }
16984 }
16985 
16986 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
16987 {
16988  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
16989  if(hAllocation->CanBecomeLost())
16990  {
16991  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16992  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16993  for(;;)
16994  {
16995  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16996  {
16997  return false;
16998  }
16999  else if(localLastUseFrameIndex == localCurrFrameIndex)
17000  {
17001  return true;
17002  }
17003  else // Last use time earlier than current time.
17004  {
17005  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17006  {
17007  localLastUseFrameIndex = localCurrFrameIndex;
17008  }
17009  }
17010  }
17011  }
17012  else
17013  {
17014 #if VMA_STATS_STRING_ENABLED
17015  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17016  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17017  for(;;)
17018  {
17019  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
17020  if(localLastUseFrameIndex == localCurrFrameIndex)
17021  {
17022  break;
17023  }
17024  else // Last use time earlier than current time.
17025  {
17026  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17027  {
17028  localLastUseFrameIndex = localCurrFrameIndex;
17029  }
17030  }
17031  }
17032 #endif
17033 
17034  return true;
17035  }
17036 }
17037 
17038 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
17039 {
17040  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
17041 
17042  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
17043 
17044  if(newCreateInfo.maxBlockCount == 0)
17045  {
17046  newCreateInfo.maxBlockCount = SIZE_MAX;
17047  }
17048  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
17049  {
17050  return VK_ERROR_INITIALIZATION_FAILED;
17051  }
17052  // Memory type index out of range or forbidden.
17053  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
17054  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
17055  {
17056  return VK_ERROR_FEATURE_NOT_PRESENT;
17057  }
17058 
17059  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
17060 
17061  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
17062 
17063  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
17064  if(res != VK_SUCCESS)
17065  {
17066  vma_delete(this, *pPool);
17067  *pPool = VMA_NULL;
17068  return res;
17069  }
17070 
17071  // Add to m_Pools.
17072  {
17073  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
17074  (*pPool)->SetId(m_NextPoolId++);
17075  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
17076  }
17077 
17078  return VK_SUCCESS;
17079 }
17080 
17081 void VmaAllocator_T::DestroyPool(VmaPool pool)
17082 {
17083  // Remove from m_Pools.
17084  {
17085  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
17086  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
17087  VMA_ASSERT(success && "Pool not found in Allocator.");
17088  }
17089 
17090  vma_delete(this, pool);
17091 }
17092 
17093 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
17094 {
17095  pool->m_BlockVector.GetPoolStats(pPoolStats);
17096 }
17097 
17098 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
17099 {
17100  m_CurrentFrameIndex.store(frameIndex);
17101 
17102 #if VMA_MEMORY_BUDGET
17103  if(m_UseExtMemoryBudget)
17104  {
17105  UpdateVulkanBudget();
17106  }
17107 #endif // #if VMA_MEMORY_BUDGET
17108 }
17109 
17110 void VmaAllocator_T::MakePoolAllocationsLost(
17111  VmaPool hPool,
17112  size_t* pLostAllocationCount)
17113 {
17114  hPool->m_BlockVector.MakePoolAllocationsLost(
17115  m_CurrentFrameIndex.load(),
17116  pLostAllocationCount);
17117 }
17118 
17119 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
17120 {
17121  return hPool->m_BlockVector.CheckCorruption();
17122 }
17123 
17124 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
17125 {
17126  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
17127 
17128  // Process default pools.
17129  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17130  {
17131  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
17132  {
17133  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
17134  VMA_ASSERT(pBlockVector);
17135  VkResult localRes = pBlockVector->CheckCorruption();
17136  switch(localRes)
17137  {
17138  case VK_ERROR_FEATURE_NOT_PRESENT:
17139  break;
17140  case VK_SUCCESS:
17141  finalRes = VK_SUCCESS;
17142  break;
17143  default:
17144  return localRes;
17145  }
17146  }
17147  }
17148 
17149  // Process custom pools.
17150  {
17151  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17152  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
17153  {
17154  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
17155  {
17156  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
17157  switch(localRes)
17158  {
17159  case VK_ERROR_FEATURE_NOT_PRESENT:
17160  break;
17161  case VK_SUCCESS:
17162  finalRes = VK_SUCCESS;
17163  break;
17164  default:
17165  return localRes;
17166  }
17167  }
17168  }
17169  }
17170 
17171  return finalRes;
17172 }
17173 
17174 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
17175 {
17176  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
17177  (*pAllocation)->InitLost();
17178 }
17179 
17180 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
17181 {
17182  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
17183 
17184  // HeapSizeLimit is in effect for this heap.
17185  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
17186  {
17187  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
17188  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
17189  for(;;)
17190  {
17191  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
17192  if(blockBytesAfterAllocation > heapSize)
17193  {
17194  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
17195  }
17196  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
17197  {
17198  break;
17199  }
17200  }
17201  }
17202  else
17203  {
17204  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
17205  }
17206 
17207  // VULKAN CALL vkAllocateMemory.
17208  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
17209 
17210  if(res == VK_SUCCESS)
17211  {
17212 #if VMA_MEMORY_BUDGET
17213  ++m_Budget.m_OperationsSinceBudgetFetch;
17214 #endif
17215 
17216  // Informative callback.
17217  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
17218  {
17219  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
17220  }
17221  }
17222  else
17223  {
17224  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
17225  }
17226 
17227  return res;
17228 }
17229 
17230 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
17231 {
17232  // Informative callback.
17233  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
17234  {
17235  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
17236  }
17237 
17238  // VULKAN CALL vkFreeMemory.
17239  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
17240 
17241  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
17242 }
17243 
17244 VkResult VmaAllocator_T::BindVulkanBuffer(
17245  VkDeviceMemory memory,
17246  VkDeviceSize memoryOffset,
17247  VkBuffer buffer,
17248  const void* pNext)
17249 {
17250  if(pNext != VMA_NULL)
17251  {
17252 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17253  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17254  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
17255  {
17256  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
17257  bindBufferMemoryInfo.pNext = pNext;
17258  bindBufferMemoryInfo.buffer = buffer;
17259  bindBufferMemoryInfo.memory = memory;
17260  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17261  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17262  }
17263  else
17264 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17265  {
17266  return VK_ERROR_EXTENSION_NOT_PRESENT;
17267  }
17268  }
17269  else
17270  {
17271  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
17272  }
17273 }
17274 
17275 VkResult VmaAllocator_T::BindVulkanImage(
17276  VkDeviceMemory memory,
17277  VkDeviceSize memoryOffset,
17278  VkImage image,
17279  const void* pNext)
17280 {
17281  if(pNext != VMA_NULL)
17282  {
17283 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17284  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17285  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
17286  {
17287  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
17288  bindBufferMemoryInfo.pNext = pNext;
17289  bindBufferMemoryInfo.image = image;
17290  bindBufferMemoryInfo.memory = memory;
17291  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17292  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17293  }
17294  else
17295 #endif // #if VMA_BIND_MEMORY2
17296  {
17297  return VK_ERROR_EXTENSION_NOT_PRESENT;
17298  }
17299  }
17300  else
17301  {
17302  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
17303  }
17304 }
17305 
17306 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
17307 {
17308  if(hAllocation->CanBecomeLost())
17309  {
17310  return VK_ERROR_MEMORY_MAP_FAILED;
17311  }
17312 
17313  switch(hAllocation->GetType())
17314  {
17315  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17316  {
17317  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17318  char *pBytes = VMA_NULL;
17319  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
17320  if(res == VK_SUCCESS)
17321  {
17322  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
17323  hAllocation->BlockAllocMap();
17324  }
17325  return res;
17326  }
17327  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17328  return hAllocation->DedicatedAllocMap(this, ppData);
17329  default:
17330  VMA_ASSERT(0);
17331  return VK_ERROR_MEMORY_MAP_FAILED;
17332  }
17333 }
17334 
17335 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
17336 {
17337  switch(hAllocation->GetType())
17338  {
17339  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17340  {
17341  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17342  hAllocation->BlockAllocUnmap();
17343  pBlock->Unmap(this, 1);
17344  }
17345  break;
17346  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17347  hAllocation->DedicatedAllocUnmap(this);
17348  break;
17349  default:
17350  VMA_ASSERT(0);
17351  }
17352 }
17353 
17354 VkResult VmaAllocator_T::BindBufferMemory(
17355  VmaAllocation hAllocation,
17356  VkDeviceSize allocationLocalOffset,
17357  VkBuffer hBuffer,
17358  const void* pNext)
17359 {
17360  VkResult res = VK_SUCCESS;
17361  switch(hAllocation->GetType())
17362  {
17363  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17364  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
17365  break;
17366  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17367  {
17368  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17369  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
17370  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
17371  break;
17372  }
17373  default:
17374  VMA_ASSERT(0);
17375  }
17376  return res;
17377 }
17378 
17379 VkResult VmaAllocator_T::BindImageMemory(
17380  VmaAllocation hAllocation,
17381  VkDeviceSize allocationLocalOffset,
17382  VkImage hImage,
17383  const void* pNext)
17384 {
17385  VkResult res = VK_SUCCESS;
17386  switch(hAllocation->GetType())
17387  {
17388  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17389  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
17390  break;
17391  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17392  {
17393  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
17394  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
17395  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
17396  break;
17397  }
17398  default:
17399  VMA_ASSERT(0);
17400  }
17401  return res;
17402 }
17403 
17404 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
17405  VmaAllocation hAllocation,
17406  VkDeviceSize offset, VkDeviceSize size,
17407  VMA_CACHE_OPERATION op)
17408 {
17409  VkResult res = VK_SUCCESS;
17410 
17411  VkMappedMemoryRange memRange = {};
17412  if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
17413  {
17414  switch(op)
17415  {
17416  case VMA_CACHE_FLUSH:
17417  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
17418  break;
17419  case VMA_CACHE_INVALIDATE:
17420  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
17421  break;
17422  default:
17423  VMA_ASSERT(0);
17424  }
17425  }
17426  // else: Just ignore this call.
17427  return res;
17428 }
17429 
17430 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
17431  uint32_t allocationCount,
17432  const VmaAllocation* allocations,
17433  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
17434  VMA_CACHE_OPERATION op)
17435 {
17436  typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
17437  typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
17438  RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
17439 
17440  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
17441  {
17442  const VmaAllocation alloc = allocations[allocIndex];
17443  const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
17444  const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
17445  VkMappedMemoryRange newRange;
17446  if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
17447  {
17448  ranges.push_back(newRange);
17449  }
17450  }
17451 
17452  VkResult res = VK_SUCCESS;
17453  if(!ranges.empty())
17454  {
17455  switch(op)
17456  {
17457  case VMA_CACHE_FLUSH:
17458  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17459  break;
17460  case VMA_CACHE_INVALIDATE:
17461  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17462  break;
17463  default:
17464  VMA_ASSERT(0);
17465  }
17466  }
17467  // else: Just ignore this call.
17468  return res;
17469 }
17470 
17471 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
17472 {
17473  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
17474 
17475  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17476  {
17477  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17478  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
17479  VMA_ASSERT(pDedicatedAllocations);
17480  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
17481  VMA_ASSERT(success);
17482  }
17483 
17484  VkDeviceMemory hMemory = allocation->GetMemory();
17485 
17486  /*
17487  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
17488  before vkFreeMemory.
17489 
17490  if(allocation->GetMappedData() != VMA_NULL)
17491  {
17492  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
17493  }
17494  */
17495 
17496  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
17497 
17498  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
17499 }
17500 
17501 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
17502 {
17503  VkBufferCreateInfo dummyBufCreateInfo;
17504  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
17505 
17506  uint32_t memoryTypeBits = 0;
17507 
17508  // Create buffer.
17509  VkBuffer buf = VK_NULL_HANDLE;
17510  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
17511  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
17512  if(res == VK_SUCCESS)
17513  {
17514  // Query for supported memory types.
17515  VkMemoryRequirements memReq;
17516  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
17517  memoryTypeBits = memReq.memoryTypeBits;
17518 
17519  // Destroy buffer.
17520  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
17521  }
17522 
17523  return memoryTypeBits;
17524 }
17525 
17526 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
17527 {
17528  // Make sure memory information is already fetched.
17529  VMA_ASSERT(GetMemoryTypeCount() > 0);
17530 
17531  uint32_t memoryTypeBits = UINT32_MAX;
17532 
17533  if(!m_UseAmdDeviceCoherentMemory)
17534  {
17535  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
17536  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17537  {
17538  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17539  {
17540  memoryTypeBits &= ~(1u << memTypeIndex);
17541  }
17542  }
17543  }
17544 
17545  return memoryTypeBits;
17546 }
17547 
17548 bool VmaAllocator_T::GetFlushOrInvalidateRange(
17549  VmaAllocation allocation,
17550  VkDeviceSize offset, VkDeviceSize size,
17551  VkMappedMemoryRange& outRange) const
17552 {
17553  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17554  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
17555  {
17556  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
17557  const VkDeviceSize allocationSize = allocation->GetSize();
17558  VMA_ASSERT(offset <= allocationSize);
17559 
17560  outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
17561  outRange.pNext = VMA_NULL;
17562  outRange.memory = allocation->GetMemory();
17563 
17564  switch(allocation->GetType())
17565  {
17566  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17567  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17568  if(size == VK_WHOLE_SIZE)
17569  {
17570  outRange.size = allocationSize - outRange.offset;
17571  }
17572  else
17573  {
17574  VMA_ASSERT(offset + size <= allocationSize);
17575  outRange.size = VMA_MIN(
17576  VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
17577  allocationSize - outRange.offset);
17578  }
17579  break;
17580  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17581  {
17582  // 1. Still within this allocation.
17583  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17584  if(size == VK_WHOLE_SIZE)
17585  {
17586  size = allocationSize - offset;
17587  }
17588  else
17589  {
17590  VMA_ASSERT(offset + size <= allocationSize);
17591  }
17592  outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
17593 
17594  // 2. Adjust to whole block.
17595  const VkDeviceSize allocationOffset = allocation->GetOffset();
17596  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
17597  const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
17598  outRange.offset += allocationOffset;
17599  outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
17600 
17601  break;
17602  }
17603  default:
17604  VMA_ASSERT(0);
17605  }
17606  return true;
17607  }
17608  return false;
17609 }
17610 
17611 #if VMA_MEMORY_BUDGET
17612 
17613 void VmaAllocator_T::UpdateVulkanBudget()
17614 {
17615  VMA_ASSERT(m_UseExtMemoryBudget);
17616 
17617  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
17618 
17619  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
17620  VmaPnextChainPushFront(&memProps, &budgetProps);
17621 
17622  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
17623 
17624  {
17625  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
17626 
17627  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
17628  {
17629  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
17630  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
17631  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
17632 
17633  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
17634  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
17635  {
17636  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
17637  }
17638  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
17639  {
17640  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
17641  }
17642  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
17643  {
17644  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
17645  }
17646  }
17647  m_Budget.m_OperationsSinceBudgetFetch = 0;
17648  }
17649 }
17650 
17651 #endif // #if VMA_MEMORY_BUDGET
17652 
17653 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
17654 {
17655  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
17656  !hAllocation->CanBecomeLost() &&
17657  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17658  {
17659  void* pData = VMA_NULL;
17660  VkResult res = Map(hAllocation, &pData);
17661  if(res == VK_SUCCESS)
17662  {
17663  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
17664  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
17665  Unmap(hAllocation);
17666  }
17667  else
17668  {
17669  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
17670  }
17671  }
17672 }
17673 
17674 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
17675 {
17676  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
17677  if(memoryTypeBits == UINT32_MAX)
17678  {
17679  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
17680  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
17681  }
17682  return memoryTypeBits;
17683 }
17684 
17685 #if VMA_STATS_STRING_ENABLED
17686 
17687 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
17688 {
17689  bool dedicatedAllocationsStarted = false;
17690  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17691  {
17692  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17693  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
17694  VMA_ASSERT(pDedicatedAllocVector);
17695  if(pDedicatedAllocVector->empty() == false)
17696  {
17697  if(dedicatedAllocationsStarted == false)
17698  {
17699  dedicatedAllocationsStarted = true;
17700  json.WriteString("DedicatedAllocations");
17701  json.BeginObject();
17702  }
17703 
17704  json.BeginString("Type ");
17705  json.ContinueString(memTypeIndex);
17706  json.EndString();
17707 
17708  json.BeginArray();
17709 
17710  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
17711  {
17712  json.BeginObject(true);
17713  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
17714  hAlloc->PrintParameters(json);
17715  json.EndObject();
17716  }
17717 
17718  json.EndArray();
17719  }
17720  }
17721  if(dedicatedAllocationsStarted)
17722  {
17723  json.EndObject();
17724  }
17725 
17726  {
17727  bool allocationsStarted = false;
17728  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17729  {
17730  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
17731  {
17732  if(allocationsStarted == false)
17733  {
17734  allocationsStarted = true;
17735  json.WriteString("DefaultPools");
17736  json.BeginObject();
17737  }
17738 
17739  json.BeginString("Type ");
17740  json.ContinueString(memTypeIndex);
17741  json.EndString();
17742 
17743  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
17744  }
17745  }
17746  if(allocationsStarted)
17747  {
17748  json.EndObject();
17749  }
17750  }
17751 
17752  // Custom pools
17753  {
17754  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17755  const size_t poolCount = m_Pools.size();
17756  if(poolCount > 0)
17757  {
17758  json.WriteString("Pools");
17759  json.BeginObject();
17760  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
17761  {
17762  json.BeginString();
17763  json.ContinueString(m_Pools[poolIndex]->GetId());
17764  json.EndString();
17765 
17766  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
17767  }
17768  json.EndObject();
17769  }
17770  }
17771 }
17772 
17773 #endif // #if VMA_STATS_STRING_ENABLED
17774 
17776 // Public interface
17777 
17778 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
17779  const VmaAllocatorCreateInfo* pCreateInfo,
17780  VmaAllocator* pAllocator)
17781 {
17782  VMA_ASSERT(pCreateInfo && pAllocator);
17783  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
17784  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
17785  VMA_DEBUG_LOG("vmaCreateAllocator");
17786  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
17787  return (*pAllocator)->Init(pCreateInfo);
17788 }
17789 
17790 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
17791  VmaAllocator allocator)
17792 {
17793  if(allocator != VK_NULL_HANDLE)
17794  {
17795  VMA_DEBUG_LOG("vmaDestroyAllocator");
17796  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
17797  vma_delete(&allocationCallbacks, allocator);
17798  }
17799 }
17800 
17801 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
17802 {
17803  VMA_ASSERT(allocator && pAllocatorInfo);
17804  pAllocatorInfo->instance = allocator->m_hInstance;
17805  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
17806  pAllocatorInfo->device = allocator->m_hDevice;
17807 }
17808 
17809 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
17810  VmaAllocator allocator,
17811  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
17812 {
17813  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
17814  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
17815 }
17816 
17817 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
17818  VmaAllocator allocator,
17819  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
17820 {
17821  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
17822  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
17823 }
17824 
17825 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
17826  VmaAllocator allocator,
17827  uint32_t memoryTypeIndex,
17828  VkMemoryPropertyFlags* pFlags)
17829 {
17830  VMA_ASSERT(allocator && pFlags);
17831  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
17832  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
17833 }
17834 
17835 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
17836  VmaAllocator allocator,
17837  uint32_t frameIndex)
17838 {
17839  VMA_ASSERT(allocator);
17840  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
17841 
17842  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17843 
17844  allocator->SetCurrentFrameIndex(frameIndex);
17845 }
17846 
17847 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
17848  VmaAllocator allocator,
17849  VmaStats* pStats)
17850 {
17851  VMA_ASSERT(allocator && pStats);
17852  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17853  allocator->CalculateStats(pStats);
17854 }
17855 
17856 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
17857  VmaAllocator allocator,
17858  VmaBudget* pBudget)
17859 {
17860  VMA_ASSERT(allocator && pBudget);
17861  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17862  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
17863 }
17864 
17865 #if VMA_STATS_STRING_ENABLED
17866 
17867 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
17868  VmaAllocator allocator,
17869  char** ppStatsString,
17870  VkBool32 detailedMap)
17871 {
17872  VMA_ASSERT(allocator && ppStatsString);
17873  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17874 
17875  VmaStringBuilder sb(allocator);
17876  {
17877  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
17878  json.BeginObject();
17879 
17880  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
17881  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
17882 
17883  VmaStats stats;
17884  allocator->CalculateStats(&stats);
17885 
17886  json.WriteString("Total");
17887  VmaPrintStatInfo(json, stats.total);
17888 
17889  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
17890  {
17891  json.BeginString("Heap ");
17892  json.ContinueString(heapIndex);
17893  json.EndString();
17894  json.BeginObject();
17895 
17896  json.WriteString("Size");
17897  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
17898 
17899  json.WriteString("Flags");
17900  json.BeginArray(true);
17901  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
17902  {
17903  json.WriteString("DEVICE_LOCAL");
17904  }
17905  json.EndArray();
17906 
17907  json.WriteString("Budget");
17908  json.BeginObject();
17909  {
17910  json.WriteString("BlockBytes");
17911  json.WriteNumber(budget[heapIndex].blockBytes);
17912  json.WriteString("AllocationBytes");
17913  json.WriteNumber(budget[heapIndex].allocationBytes);
17914  json.WriteString("Usage");
17915  json.WriteNumber(budget[heapIndex].usage);
17916  json.WriteString("Budget");
17917  json.WriteNumber(budget[heapIndex].budget);
17918  }
17919  json.EndObject();
17920 
17921  if(stats.memoryHeap[heapIndex].blockCount > 0)
17922  {
17923  json.WriteString("Stats");
17924  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
17925  }
17926 
17927  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
17928  {
17929  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
17930  {
17931  json.BeginString("Type ");
17932  json.ContinueString(typeIndex);
17933  json.EndString();
17934 
17935  json.BeginObject();
17936 
17937  json.WriteString("Flags");
17938  json.BeginArray(true);
17939  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
17940  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
17941  {
17942  json.WriteString("DEVICE_LOCAL");
17943  }
17944  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17945  {
17946  json.WriteString("HOST_VISIBLE");
17947  }
17948  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
17949  {
17950  json.WriteString("HOST_COHERENT");
17951  }
17952  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
17953  {
17954  json.WriteString("HOST_CACHED");
17955  }
17956  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
17957  {
17958  json.WriteString("LAZILY_ALLOCATED");
17959  }
17960  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
17961  {
17962  json.WriteString(" PROTECTED");
17963  }
17964  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17965  {
17966  json.WriteString(" DEVICE_COHERENT");
17967  }
17968  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
17969  {
17970  json.WriteString(" DEVICE_UNCACHED");
17971  }
17972  json.EndArray();
17973 
17974  if(stats.memoryType[typeIndex].blockCount > 0)
17975  {
17976  json.WriteString("Stats");
17977  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
17978  }
17979 
17980  json.EndObject();
17981  }
17982  }
17983 
17984  json.EndObject();
17985  }
17986  if(detailedMap == VK_TRUE)
17987  {
17988  allocator->PrintDetailedMap(json);
17989  }
17990 
17991  json.EndObject();
17992  }
17993 
17994  const size_t len = sb.GetLength();
17995  char* const pChars = vma_new_array(allocator, char, len + 1);
17996  if(len > 0)
17997  {
17998  memcpy(pChars, sb.GetData(), len);
17999  }
18000  pChars[len] = '\0';
18001  *ppStatsString = pChars;
18002 }
18003 
18004 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
18005  VmaAllocator allocator,
18006  char* pStatsString)
18007 {
18008  if(pStatsString != VMA_NULL)
18009  {
18010  VMA_ASSERT(allocator);
18011  size_t len = strlen(pStatsString);
18012  vma_delete_array(allocator, pStatsString, len + 1);
18013  }
18014 }
18015 
18016 #endif // #if VMA_STATS_STRING_ENABLED
18017 
18018 /*
18019 This function is not protected by any mutex because it just reads immutable data.
18020 */
18021 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
18022  VmaAllocator allocator,
18023  uint32_t memoryTypeBits,
18024  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18025  uint32_t* pMemoryTypeIndex)
18026 {
18027  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18028  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18029  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18030 
18031  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
18032 
18033  if(pAllocationCreateInfo->memoryTypeBits != 0)
18034  {
18035  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
18036  }
18037 
18038  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
18039  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
18040  uint32_t notPreferredFlags = 0;
18041 
18042  // Convert usage to requiredFlags and preferredFlags.
18043  switch(pAllocationCreateInfo->usage)
18044  {
18046  break;
18048  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
18049  {
18050  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18051  }
18052  break;
18054  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
18055  break;
18057  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
18058  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
18059  {
18060  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18061  }
18062  break;
18064  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
18065  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
18066  break;
18068  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18069  break;
18071  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
18072  break;
18073  default:
18074  VMA_ASSERT(0);
18075  break;
18076  }
18077 
18078  // Avoid DEVICE_COHERENT unless explicitly requested.
18079  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
18080  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
18081  {
18082  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
18083  }
18084 
18085  *pMemoryTypeIndex = UINT32_MAX;
18086  uint32_t minCost = UINT32_MAX;
18087  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
18088  memTypeIndex < allocator->GetMemoryTypeCount();
18089  ++memTypeIndex, memTypeBit <<= 1)
18090  {
18091  // This memory type is acceptable according to memoryTypeBits bitmask.
18092  if((memTypeBit & memoryTypeBits) != 0)
18093  {
18094  const VkMemoryPropertyFlags currFlags =
18095  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
18096  // This memory type contains requiredFlags.
18097  if((requiredFlags & ~currFlags) == 0)
18098  {
18099  // Calculate cost as number of bits from preferredFlags not present in this memory type.
18100  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
18101  VmaCountBitsSet(currFlags & notPreferredFlags);
18102  // Remember memory type with lowest cost.
18103  if(currCost < minCost)
18104  {
18105  *pMemoryTypeIndex = memTypeIndex;
18106  if(currCost == 0)
18107  {
18108  return VK_SUCCESS;
18109  }
18110  minCost = currCost;
18111  }
18112  }
18113  }
18114  }
18115  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
18116 }
18117 
18118 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
18119  VmaAllocator allocator,
18120  const VkBufferCreateInfo* pBufferCreateInfo,
18121  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18122  uint32_t* pMemoryTypeIndex)
18123 {
18124  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18125  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
18126  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18127  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18128 
18129  const VkDevice hDev = allocator->m_hDevice;
18130  VkBuffer hBuffer = VK_NULL_HANDLE;
18131  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
18132  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
18133  if(res == VK_SUCCESS)
18134  {
18135  VkMemoryRequirements memReq = {};
18136  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
18137  hDev, hBuffer, &memReq);
18138 
18139  res = vmaFindMemoryTypeIndex(
18140  allocator,
18141  memReq.memoryTypeBits,
18142  pAllocationCreateInfo,
18143  pMemoryTypeIndex);
18144 
18145  allocator->GetVulkanFunctions().vkDestroyBuffer(
18146  hDev, hBuffer, allocator->GetAllocationCallbacks());
18147  }
18148  return res;
18149 }
18150 
18151 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
18152  VmaAllocator allocator,
18153  const VkImageCreateInfo* pImageCreateInfo,
18154  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18155  uint32_t* pMemoryTypeIndex)
18156 {
18157  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18158  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
18159  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18160  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18161 
18162  const VkDevice hDev = allocator->m_hDevice;
18163  VkImage hImage = VK_NULL_HANDLE;
18164  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
18165  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
18166  if(res == VK_SUCCESS)
18167  {
18168  VkMemoryRequirements memReq = {};
18169  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
18170  hDev, hImage, &memReq);
18171 
18172  res = vmaFindMemoryTypeIndex(
18173  allocator,
18174  memReq.memoryTypeBits,
18175  pAllocationCreateInfo,
18176  pMemoryTypeIndex);
18177 
18178  allocator->GetVulkanFunctions().vkDestroyImage(
18179  hDev, hImage, allocator->GetAllocationCallbacks());
18180  }
18181  return res;
18182 }
18183 
18184 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
18185  VmaAllocator allocator,
18186  const VmaPoolCreateInfo* pCreateInfo,
18187  VmaPool* pPool)
18188 {
18189  VMA_ASSERT(allocator && pCreateInfo && pPool);
18190 
18191  VMA_DEBUG_LOG("vmaCreatePool");
18192 
18193  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18194 
18195  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
18196 
18197 #if VMA_RECORDING_ENABLED
18198  if(allocator->GetRecorder() != VMA_NULL)
18199  {
18200  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
18201  }
18202 #endif
18203 
18204  return res;
18205 }
18206 
18207 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
18208  VmaAllocator allocator,
18209  VmaPool pool)
18210 {
18211  VMA_ASSERT(allocator);
18212 
18213  if(pool == VK_NULL_HANDLE)
18214  {
18215  return;
18216  }
18217 
18218  VMA_DEBUG_LOG("vmaDestroyPool");
18219 
18220  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18221 
18222 #if VMA_RECORDING_ENABLED
18223  if(allocator->GetRecorder() != VMA_NULL)
18224  {
18225  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
18226  }
18227 #endif
18228 
18229  allocator->DestroyPool(pool);
18230 }
18231 
18232 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
18233  VmaAllocator allocator,
18234  VmaPool pool,
18235  VmaPoolStats* pPoolStats)
18236 {
18237  VMA_ASSERT(allocator && pool && pPoolStats);
18238 
18239  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18240 
18241  allocator->GetPoolStats(pool, pPoolStats);
18242 }
18243 
18244 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
18245  VmaAllocator allocator,
18246  VmaPool pool,
18247  size_t* pLostAllocationCount)
18248 {
18249  VMA_ASSERT(allocator && pool);
18250 
18251  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18252 
18253 #if VMA_RECORDING_ENABLED
18254  if(allocator->GetRecorder() != VMA_NULL)
18255  {
18256  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
18257  }
18258 #endif
18259 
18260  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
18261 }
18262 
18263 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
18264 {
18265  VMA_ASSERT(allocator && pool);
18266 
18267  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18268 
18269  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
18270 
18271  return allocator->CheckPoolCorruption(pool);
18272 }
18273 
18274 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
18275  VmaAllocator allocator,
18276  VmaPool pool,
18277  const char** ppName)
18278 {
18279  VMA_ASSERT(allocator && pool && ppName);
18280 
18281  VMA_DEBUG_LOG("vmaGetPoolName");
18282 
18283  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18284 
18285  *ppName = pool->GetName();
18286 }
18287 
18288 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
18289  VmaAllocator allocator,
18290  VmaPool pool,
18291  const char* pName)
18292 {
18293  VMA_ASSERT(allocator && pool);
18294 
18295  VMA_DEBUG_LOG("vmaSetPoolName");
18296 
18297  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18298 
18299  pool->SetName(pName);
18300 
18301 #if VMA_RECORDING_ENABLED
18302  if(allocator->GetRecorder() != VMA_NULL)
18303  {
18304  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
18305  }
18306 #endif
18307 }
18308 
18309 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
18310  VmaAllocator allocator,
18311  const VkMemoryRequirements* pVkMemoryRequirements,
18312  const VmaAllocationCreateInfo* pCreateInfo,
18313  VmaAllocation* pAllocation,
18314  VmaAllocationInfo* pAllocationInfo)
18315 {
18316  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
18317 
18318  VMA_DEBUG_LOG("vmaAllocateMemory");
18319 
18320  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18321 
18322  VkResult result = allocator->AllocateMemory(
18323  *pVkMemoryRequirements,
18324  false, // requiresDedicatedAllocation
18325  false, // prefersDedicatedAllocation
18326  VK_NULL_HANDLE, // dedicatedBuffer
18327  UINT32_MAX, // dedicatedBufferUsage
18328  VK_NULL_HANDLE, // dedicatedImage
18329  *pCreateInfo,
18330  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18331  1, // allocationCount
18332  pAllocation);
18333 
18334 #if VMA_RECORDING_ENABLED
18335  if(allocator->GetRecorder() != VMA_NULL)
18336  {
18337  allocator->GetRecorder()->RecordAllocateMemory(
18338  allocator->GetCurrentFrameIndex(),
18339  *pVkMemoryRequirements,
18340  *pCreateInfo,
18341  *pAllocation);
18342  }
18343 #endif
18344 
18345  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18346  {
18347  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18348  }
18349 
18350  return result;
18351 }
18352 
18353 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
18354  VmaAllocator allocator,
18355  const VkMemoryRequirements* pVkMemoryRequirements,
18356  const VmaAllocationCreateInfo* pCreateInfo,
18357  size_t allocationCount,
18358  VmaAllocation* pAllocations,
18359  VmaAllocationInfo* pAllocationInfo)
18360 {
18361  if(allocationCount == 0)
18362  {
18363  return VK_SUCCESS;
18364  }
18365 
18366  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
18367 
18368  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
18369 
18370  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18371 
18372  VkResult result = allocator->AllocateMemory(
18373  *pVkMemoryRequirements,
18374  false, // requiresDedicatedAllocation
18375  false, // prefersDedicatedAllocation
18376  VK_NULL_HANDLE, // dedicatedBuffer
18377  UINT32_MAX, // dedicatedBufferUsage
18378  VK_NULL_HANDLE, // dedicatedImage
18379  *pCreateInfo,
18380  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18381  allocationCount,
18382  pAllocations);
18383 
18384 #if VMA_RECORDING_ENABLED
18385  if(allocator->GetRecorder() != VMA_NULL)
18386  {
18387  allocator->GetRecorder()->RecordAllocateMemoryPages(
18388  allocator->GetCurrentFrameIndex(),
18389  *pVkMemoryRequirements,
18390  *pCreateInfo,
18391  (uint64_t)allocationCount,
18392  pAllocations);
18393  }
18394 #endif
18395 
18396  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18397  {
18398  for(size_t i = 0; i < allocationCount; ++i)
18399  {
18400  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
18401  }
18402  }
18403 
18404  return result;
18405 }
18406 
18407 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
18408  VmaAllocator allocator,
18409  VkBuffer buffer,
18410  const VmaAllocationCreateInfo* pCreateInfo,
18411  VmaAllocation* pAllocation,
18412  VmaAllocationInfo* pAllocationInfo)
18413 {
18414  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18415 
18416  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
18417 
18418  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18419 
18420  VkMemoryRequirements vkMemReq = {};
18421  bool requiresDedicatedAllocation = false;
18422  bool prefersDedicatedAllocation = false;
18423  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
18424  requiresDedicatedAllocation,
18425  prefersDedicatedAllocation);
18426 
18427  VkResult result = allocator->AllocateMemory(
18428  vkMemReq,
18429  requiresDedicatedAllocation,
18430  prefersDedicatedAllocation,
18431  buffer, // dedicatedBuffer
18432  UINT32_MAX, // dedicatedBufferUsage
18433  VK_NULL_HANDLE, // dedicatedImage
18434  *pCreateInfo,
18435  VMA_SUBALLOCATION_TYPE_BUFFER,
18436  1, // allocationCount
18437  pAllocation);
18438 
18439 #if VMA_RECORDING_ENABLED
18440  if(allocator->GetRecorder() != VMA_NULL)
18441  {
18442  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
18443  allocator->GetCurrentFrameIndex(),
18444  vkMemReq,
18445  requiresDedicatedAllocation,
18446  prefersDedicatedAllocation,
18447  *pCreateInfo,
18448  *pAllocation);
18449  }
18450 #endif
18451 
18452  if(pAllocationInfo && result == VK_SUCCESS)
18453  {
18454  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18455  }
18456 
18457  return result;
18458 }
18459 
18460 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
18461  VmaAllocator allocator,
18462  VkImage image,
18463  const VmaAllocationCreateInfo* pCreateInfo,
18464  VmaAllocation* pAllocation,
18465  VmaAllocationInfo* pAllocationInfo)
18466 {
18467  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18468 
18469  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
18470 
18471  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18472 
18473  VkMemoryRequirements vkMemReq = {};
18474  bool requiresDedicatedAllocation = false;
18475  bool prefersDedicatedAllocation = false;
18476  allocator->GetImageMemoryRequirements(image, vkMemReq,
18477  requiresDedicatedAllocation, prefersDedicatedAllocation);
18478 
18479  VkResult result = allocator->AllocateMemory(
18480  vkMemReq,
18481  requiresDedicatedAllocation,
18482  prefersDedicatedAllocation,
18483  VK_NULL_HANDLE, // dedicatedBuffer
18484  UINT32_MAX, // dedicatedBufferUsage
18485  image, // dedicatedImage
18486  *pCreateInfo,
18487  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
18488  1, // allocationCount
18489  pAllocation);
18490 
18491 #if VMA_RECORDING_ENABLED
18492  if(allocator->GetRecorder() != VMA_NULL)
18493  {
18494  allocator->GetRecorder()->RecordAllocateMemoryForImage(
18495  allocator->GetCurrentFrameIndex(),
18496  vkMemReq,
18497  requiresDedicatedAllocation,
18498  prefersDedicatedAllocation,
18499  *pCreateInfo,
18500  *pAllocation);
18501  }
18502 #endif
18503 
18504  if(pAllocationInfo && result == VK_SUCCESS)
18505  {
18506  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18507  }
18508 
18509  return result;
18510 }
18511 
18512 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
18513  VmaAllocator allocator,
18514  VmaAllocation allocation)
18515 {
18516  VMA_ASSERT(allocator);
18517 
18518  if(allocation == VK_NULL_HANDLE)
18519  {
18520  return;
18521  }
18522 
18523  VMA_DEBUG_LOG("vmaFreeMemory");
18524 
18525  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18526 
18527 #if VMA_RECORDING_ENABLED
18528  if(allocator->GetRecorder() != VMA_NULL)
18529  {
18530  allocator->GetRecorder()->RecordFreeMemory(
18531  allocator->GetCurrentFrameIndex(),
18532  allocation);
18533  }
18534 #endif
18535 
18536  allocator->FreeMemory(
18537  1, // allocationCount
18538  &allocation);
18539 }
18540 
18541 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
18542  VmaAllocator allocator,
18543  size_t allocationCount,
18544  const VmaAllocation* pAllocations)
18545 {
18546  if(allocationCount == 0)
18547  {
18548  return;
18549  }
18550 
18551  VMA_ASSERT(allocator);
18552 
18553  VMA_DEBUG_LOG("vmaFreeMemoryPages");
18554 
18555  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18556 
18557 #if VMA_RECORDING_ENABLED
18558  if(allocator->GetRecorder() != VMA_NULL)
18559  {
18560  allocator->GetRecorder()->RecordFreeMemoryPages(
18561  allocator->GetCurrentFrameIndex(),
18562  (uint64_t)allocationCount,
18563  pAllocations);
18564  }
18565 #endif
18566 
18567  allocator->FreeMemory(allocationCount, pAllocations);
18568 }
18569 
18570 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
18571  VmaAllocator allocator,
18572  VmaAllocation allocation,
18573  VkDeviceSize newSize)
18574 {
18575  VMA_ASSERT(allocator && allocation);
18576 
18577  VMA_DEBUG_LOG("vmaResizeAllocation");
18578 
18579  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18580 
18581  return allocator->ResizeAllocation(allocation, newSize);
18582 }
18583 
18584 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
18585  VmaAllocator allocator,
18586  VmaAllocation allocation,
18587  VmaAllocationInfo* pAllocationInfo)
18588 {
18589  VMA_ASSERT(allocator && allocation && pAllocationInfo);
18590 
18591  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18592 
18593 #if VMA_RECORDING_ENABLED
18594  if(allocator->GetRecorder() != VMA_NULL)
18595  {
18596  allocator->GetRecorder()->RecordGetAllocationInfo(
18597  allocator->GetCurrentFrameIndex(),
18598  allocation);
18599  }
18600 #endif
18601 
18602  allocator->GetAllocationInfo(allocation, pAllocationInfo);
18603 }
18604 
18605 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
18606  VmaAllocator allocator,
18607  VmaAllocation allocation)
18608 {
18609  VMA_ASSERT(allocator && allocation);
18610 
18611  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18612 
18613 #if VMA_RECORDING_ENABLED
18614  if(allocator->GetRecorder() != VMA_NULL)
18615  {
18616  allocator->GetRecorder()->RecordTouchAllocation(
18617  allocator->GetCurrentFrameIndex(),
18618  allocation);
18619  }
18620 #endif
18621 
18622  return allocator->TouchAllocation(allocation);
18623 }
18624 
18625 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
18626  VmaAllocator allocator,
18627  VmaAllocation allocation,
18628  void* pUserData)
18629 {
18630  VMA_ASSERT(allocator && allocation);
18631 
18632  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18633 
18634  allocation->SetUserData(allocator, pUserData);
18635 
18636 #if VMA_RECORDING_ENABLED
18637  if(allocator->GetRecorder() != VMA_NULL)
18638  {
18639  allocator->GetRecorder()->RecordSetAllocationUserData(
18640  allocator->GetCurrentFrameIndex(),
18641  allocation,
18642  pUserData);
18643  }
18644 #endif
18645 }
18646 
18647 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
18648  VmaAllocator allocator,
18649  VmaAllocation* pAllocation)
18650 {
18651  VMA_ASSERT(allocator && pAllocation);
18652 
18653  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
18654 
18655  allocator->CreateLostAllocation(pAllocation);
18656 
18657 #if VMA_RECORDING_ENABLED
18658  if(allocator->GetRecorder() != VMA_NULL)
18659  {
18660  allocator->GetRecorder()->RecordCreateLostAllocation(
18661  allocator->GetCurrentFrameIndex(),
18662  *pAllocation);
18663  }
18664 #endif
18665 }
18666 
18667 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
18668  VmaAllocator allocator,
18669  VmaAllocation allocation,
18670  void** ppData)
18671 {
18672  VMA_ASSERT(allocator && allocation && ppData);
18673 
18674  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18675 
18676  VkResult res = allocator->Map(allocation, ppData);
18677 
18678 #if VMA_RECORDING_ENABLED
18679  if(allocator->GetRecorder() != VMA_NULL)
18680  {
18681  allocator->GetRecorder()->RecordMapMemory(
18682  allocator->GetCurrentFrameIndex(),
18683  allocation);
18684  }
18685 #endif
18686 
18687  return res;
18688 }
18689 
18690 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
18691  VmaAllocator allocator,
18692  VmaAllocation allocation)
18693 {
18694  VMA_ASSERT(allocator && allocation);
18695 
18696  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18697 
18698 #if VMA_RECORDING_ENABLED
18699  if(allocator->GetRecorder() != VMA_NULL)
18700  {
18701  allocator->GetRecorder()->RecordUnmapMemory(
18702  allocator->GetCurrentFrameIndex(),
18703  allocation);
18704  }
18705 #endif
18706 
18707  allocator->Unmap(allocation);
18708 }
18709 
18710 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18711 {
18712  VMA_ASSERT(allocator && allocation);
18713 
18714  VMA_DEBUG_LOG("vmaFlushAllocation");
18715 
18716  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18717 
18718  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
18719 
18720 #if VMA_RECORDING_ENABLED
18721  if(allocator->GetRecorder() != VMA_NULL)
18722  {
18723  allocator->GetRecorder()->RecordFlushAllocation(
18724  allocator->GetCurrentFrameIndex(),
18725  allocation, offset, size);
18726  }
18727 #endif
18728 
18729  return res;
18730 }
18731 
18732 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18733 {
18734  VMA_ASSERT(allocator && allocation);
18735 
18736  VMA_DEBUG_LOG("vmaInvalidateAllocation");
18737 
18738  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18739 
18740  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
18741 
18742 #if VMA_RECORDING_ENABLED
18743  if(allocator->GetRecorder() != VMA_NULL)
18744  {
18745  allocator->GetRecorder()->RecordInvalidateAllocation(
18746  allocator->GetCurrentFrameIndex(),
18747  allocation, offset, size);
18748  }
18749 #endif
18750 
18751  return res;
18752 }
18753 
18754 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
18755  VmaAllocator allocator,
18756  uint32_t allocationCount,
18757  const VmaAllocation* allocations,
18758  const VkDeviceSize* offsets,
18759  const VkDeviceSize* sizes)
18760 {
18761  VMA_ASSERT(allocator);
18762 
18763  if(allocationCount == 0)
18764  {
18765  return VK_SUCCESS;
18766  }
18767 
18768  VMA_ASSERT(allocations);
18769 
18770  VMA_DEBUG_LOG("vmaFlushAllocations");
18771 
18772  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18773 
18774  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
18775 
18776 #if VMA_RECORDING_ENABLED
18777  if(allocator->GetRecorder() != VMA_NULL)
18778  {
18779  //TODO
18780  }
18781 #endif
18782 
18783  return res;
18784 }
18785 
18786 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
18787  VmaAllocator allocator,
18788  uint32_t allocationCount,
18789  const VmaAllocation* allocations,
18790  const VkDeviceSize* offsets,
18791  const VkDeviceSize* sizes)
18792 {
18793  VMA_ASSERT(allocator);
18794 
18795  if(allocationCount == 0)
18796  {
18797  return VK_SUCCESS;
18798  }
18799 
18800  VMA_ASSERT(allocations);
18801 
18802  VMA_DEBUG_LOG("vmaInvalidateAllocations");
18803 
18804  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18805 
18806  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
18807 
18808 #if VMA_RECORDING_ENABLED
18809  if(allocator->GetRecorder() != VMA_NULL)
18810  {
18811  //TODO
18812  }
18813 #endif
18814 
18815  return res;
18816 }
18817 
18818 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
18819 {
18820  VMA_ASSERT(allocator);
18821 
18822  VMA_DEBUG_LOG("vmaCheckCorruption");
18823 
18824  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18825 
18826  return allocator->CheckCorruption(memoryTypeBits);
18827 }
18828 
18829 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
18830  VmaAllocator allocator,
18831  const VmaAllocation* pAllocations,
18832  size_t allocationCount,
18833  VkBool32* pAllocationsChanged,
18834  const VmaDefragmentationInfo *pDefragmentationInfo,
18835  VmaDefragmentationStats* pDefragmentationStats)
18836 {
18837  // Deprecated interface, reimplemented using new one.
18838 
18839  VmaDefragmentationInfo2 info2 = {};
18840  info2.allocationCount = (uint32_t)allocationCount;
18841  info2.pAllocations = pAllocations;
18842  info2.pAllocationsChanged = pAllocationsChanged;
18843  if(pDefragmentationInfo != VMA_NULL)
18844  {
18845  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
18846  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
18847  }
18848  else
18849  {
18850  info2.maxCpuAllocationsToMove = UINT32_MAX;
18851  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
18852  }
18853  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
18854 
18856  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
18857  if(res == VK_NOT_READY)
18858  {
18859  res = vmaDefragmentationEnd( allocator, ctx);
18860  }
18861  return res;
18862 }
18863 
18864 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
18865  VmaAllocator allocator,
18866  const VmaDefragmentationInfo2* pInfo,
18867  VmaDefragmentationStats* pStats,
18868  VmaDefragmentationContext *pContext)
18869 {
18870  VMA_ASSERT(allocator && pInfo && pContext);
18871 
18872  // Degenerate case: Nothing to defragment.
18873  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
18874  {
18875  return VK_SUCCESS;
18876  }
18877 
18878  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
18879  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
18880  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
18881  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
18882 
18883  VMA_DEBUG_LOG("vmaDefragmentationBegin");
18884 
18885  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18886 
18887  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
18888 
18889 #if VMA_RECORDING_ENABLED
18890  if(allocator->GetRecorder() != VMA_NULL)
18891  {
18892  allocator->GetRecorder()->RecordDefragmentationBegin(
18893  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
18894  }
18895 #endif
18896 
18897  return res;
18898 }
18899 
18900 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
18901  VmaAllocator allocator,
18902  VmaDefragmentationContext context)
18903 {
18904  VMA_ASSERT(allocator);
18905 
18906  VMA_DEBUG_LOG("vmaDefragmentationEnd");
18907 
18908  if(context != VK_NULL_HANDLE)
18909  {
18910  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18911 
18912 #if VMA_RECORDING_ENABLED
18913  if(allocator->GetRecorder() != VMA_NULL)
18914  {
18915  allocator->GetRecorder()->RecordDefragmentationEnd(
18916  allocator->GetCurrentFrameIndex(), context);
18917  }
18918 #endif
18919 
18920  return allocator->DefragmentationEnd(context);
18921  }
18922  else
18923  {
18924  return VK_SUCCESS;
18925  }
18926 }
18927 
18928 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
18929  VmaAllocator allocator,
18930  VmaDefragmentationContext context,
18932  )
18933 {
18934  VMA_ASSERT(allocator);
18935  VMA_ASSERT(pInfo);
18936 
18937  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
18938 
18939  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18940 
18941  if(context == VK_NULL_HANDLE)
18942  {
18943  pInfo->moveCount = 0;
18944  return VK_SUCCESS;
18945  }
18946 
18947  return allocator->DefragmentationPassBegin(pInfo, context);
18948 }
18949 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
18950  VmaAllocator allocator,
18951  VmaDefragmentationContext context)
18952 {
18953  VMA_ASSERT(allocator);
18954 
18955  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
18956  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18957 
18958  if(context == VK_NULL_HANDLE)
18959  return VK_SUCCESS;
18960 
18961  return allocator->DefragmentationPassEnd(context);
18962 }
18963 
18964 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
18965  VmaAllocator allocator,
18966  VmaAllocation allocation,
18967  VkBuffer buffer)
18968 {
18969  VMA_ASSERT(allocator && allocation && buffer);
18970 
18971  VMA_DEBUG_LOG("vmaBindBufferMemory");
18972 
18973  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18974 
18975  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
18976 }
18977 
18978 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
18979  VmaAllocator allocator,
18980  VmaAllocation allocation,
18981  VkDeviceSize allocationLocalOffset,
18982  VkBuffer buffer,
18983  const void* pNext)
18984 {
18985  VMA_ASSERT(allocator && allocation && buffer);
18986 
18987  VMA_DEBUG_LOG("vmaBindBufferMemory2");
18988 
18989  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18990 
18991  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
18992 }
18993 
18994 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
18995  VmaAllocator allocator,
18996  VmaAllocation allocation,
18997  VkImage image)
18998 {
18999  VMA_ASSERT(allocator && allocation && image);
19000 
19001  VMA_DEBUG_LOG("vmaBindImageMemory");
19002 
19003  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19004 
19005  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
19006 }
19007 
19008 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
19009  VmaAllocator allocator,
19010  VmaAllocation allocation,
19011  VkDeviceSize allocationLocalOffset,
19012  VkImage image,
19013  const void* pNext)
19014 {
19015  VMA_ASSERT(allocator && allocation && image);
19016 
19017  VMA_DEBUG_LOG("vmaBindImageMemory2");
19018 
19019  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19020 
19021  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
19022 }
19023 
19024 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
19025  VmaAllocator allocator,
19026  const VkBufferCreateInfo* pBufferCreateInfo,
19027  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19028  VkBuffer* pBuffer,
19029  VmaAllocation* pAllocation,
19030  VmaAllocationInfo* pAllocationInfo)
19031 {
19032  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
19033 
19034  if(pBufferCreateInfo->size == 0)
19035  {
19036  return VK_ERROR_VALIDATION_FAILED_EXT;
19037  }
19038  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
19039  !allocator->m_UseKhrBufferDeviceAddress)
19040  {
19041  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
19042  return VK_ERROR_VALIDATION_FAILED_EXT;
19043  }
19044 
19045  VMA_DEBUG_LOG("vmaCreateBuffer");
19046 
19047  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19048 
19049  *pBuffer = VK_NULL_HANDLE;
19050  *pAllocation = VK_NULL_HANDLE;
19051 
19052  // 1. Create VkBuffer.
19053  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
19054  allocator->m_hDevice,
19055  pBufferCreateInfo,
19056  allocator->GetAllocationCallbacks(),
19057  pBuffer);
19058  if(res >= 0)
19059  {
19060  // 2. vkGetBufferMemoryRequirements.
19061  VkMemoryRequirements vkMemReq = {};
19062  bool requiresDedicatedAllocation = false;
19063  bool prefersDedicatedAllocation = false;
19064  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
19065  requiresDedicatedAllocation, prefersDedicatedAllocation);
19066 
19067  // 3. Allocate memory using allocator.
19068  res = allocator->AllocateMemory(
19069  vkMemReq,
19070  requiresDedicatedAllocation,
19071  prefersDedicatedAllocation,
19072  *pBuffer, // dedicatedBuffer
19073  pBufferCreateInfo->usage, // dedicatedBufferUsage
19074  VK_NULL_HANDLE, // dedicatedImage
19075  *pAllocationCreateInfo,
19076  VMA_SUBALLOCATION_TYPE_BUFFER,
19077  1, // allocationCount
19078  pAllocation);
19079 
19080 #if VMA_RECORDING_ENABLED
19081  if(allocator->GetRecorder() != VMA_NULL)
19082  {
19083  allocator->GetRecorder()->RecordCreateBuffer(
19084  allocator->GetCurrentFrameIndex(),
19085  *pBufferCreateInfo,
19086  *pAllocationCreateInfo,
19087  *pAllocation);
19088  }
19089 #endif
19090 
19091  if(res >= 0)
19092  {
19093  // 3. Bind buffer with memory.
19094  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19095  {
19096  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
19097  }
19098  if(res >= 0)
19099  {
19100  // All steps succeeded.
19101  #if VMA_STATS_STRING_ENABLED
19102  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
19103  #endif
19104  if(pAllocationInfo != VMA_NULL)
19105  {
19106  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19107  }
19108 
19109  return VK_SUCCESS;
19110  }
19111  allocator->FreeMemory(
19112  1, // allocationCount
19113  pAllocation);
19114  *pAllocation = VK_NULL_HANDLE;
19115  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19116  *pBuffer = VK_NULL_HANDLE;
19117  return res;
19118  }
19119  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19120  *pBuffer = VK_NULL_HANDLE;
19121  return res;
19122  }
19123  return res;
19124 }
19125 
19126 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
19127  VmaAllocator allocator,
19128  VkBuffer buffer,
19129  VmaAllocation allocation)
19130 {
19131  VMA_ASSERT(allocator);
19132 
19133  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19134  {
19135  return;
19136  }
19137 
19138  VMA_DEBUG_LOG("vmaDestroyBuffer");
19139 
19140  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19141 
19142 #if VMA_RECORDING_ENABLED
19143  if(allocator->GetRecorder() != VMA_NULL)
19144  {
19145  allocator->GetRecorder()->RecordDestroyBuffer(
19146  allocator->GetCurrentFrameIndex(),
19147  allocation);
19148  }
19149 #endif
19150 
19151  if(buffer != VK_NULL_HANDLE)
19152  {
19153  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
19154  }
19155 
19156  if(allocation != VK_NULL_HANDLE)
19157  {
19158  allocator->FreeMemory(
19159  1, // allocationCount
19160  &allocation);
19161  }
19162 }
19163 
19164 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
19165  VmaAllocator allocator,
19166  const VkImageCreateInfo* pImageCreateInfo,
19167  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19168  VkImage* pImage,
19169  VmaAllocation* pAllocation,
19170  VmaAllocationInfo* pAllocationInfo)
19171 {
19172  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
19173 
19174  if(pImageCreateInfo->extent.width == 0 ||
19175  pImageCreateInfo->extent.height == 0 ||
19176  pImageCreateInfo->extent.depth == 0 ||
19177  pImageCreateInfo->mipLevels == 0 ||
19178  pImageCreateInfo->arrayLayers == 0)
19179  {
19180  return VK_ERROR_VALIDATION_FAILED_EXT;
19181  }
19182 
19183  VMA_DEBUG_LOG("vmaCreateImage");
19184 
19185  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19186 
19187  *pImage = VK_NULL_HANDLE;
19188  *pAllocation = VK_NULL_HANDLE;
19189 
19190  // 1. Create VkImage.
19191  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
19192  allocator->m_hDevice,
19193  pImageCreateInfo,
19194  allocator->GetAllocationCallbacks(),
19195  pImage);
19196  if(res >= 0)
19197  {
19198  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
19199  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
19200  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
19201 
19202  // 2. Allocate memory using allocator.
19203  VkMemoryRequirements vkMemReq = {};
19204  bool requiresDedicatedAllocation = false;
19205  bool prefersDedicatedAllocation = false;
19206  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
19207  requiresDedicatedAllocation, prefersDedicatedAllocation);
19208 
19209  res = allocator->AllocateMemory(
19210  vkMemReq,
19211  requiresDedicatedAllocation,
19212  prefersDedicatedAllocation,
19213  VK_NULL_HANDLE, // dedicatedBuffer
19214  UINT32_MAX, // dedicatedBufferUsage
19215  *pImage, // dedicatedImage
19216  *pAllocationCreateInfo,
19217  suballocType,
19218  1, // allocationCount
19219  pAllocation);
19220 
19221 #if VMA_RECORDING_ENABLED
19222  if(allocator->GetRecorder() != VMA_NULL)
19223  {
19224  allocator->GetRecorder()->RecordCreateImage(
19225  allocator->GetCurrentFrameIndex(),
19226  *pImageCreateInfo,
19227  *pAllocationCreateInfo,
19228  *pAllocation);
19229  }
19230 #endif
19231 
19232  if(res >= 0)
19233  {
19234  // 3. Bind image with memory.
19235  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19236  {
19237  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
19238  }
19239  if(res >= 0)
19240  {
19241  // All steps succeeded.
19242  #if VMA_STATS_STRING_ENABLED
19243  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
19244  #endif
19245  if(pAllocationInfo != VMA_NULL)
19246  {
19247  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19248  }
19249 
19250  return VK_SUCCESS;
19251  }
19252  allocator->FreeMemory(
19253  1, // allocationCount
19254  pAllocation);
19255  *pAllocation = VK_NULL_HANDLE;
19256  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19257  *pImage = VK_NULL_HANDLE;
19258  return res;
19259  }
19260  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19261  *pImage = VK_NULL_HANDLE;
19262  return res;
19263  }
19264  return res;
19265 }
19266 
19267 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
19268  VmaAllocator allocator,
19269  VkImage image,
19270  VmaAllocation allocation)
19271 {
19272  VMA_ASSERT(allocator);
19273 
19274  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19275  {
19276  return;
19277  }
19278 
19279  VMA_DEBUG_LOG("vmaDestroyImage");
19280 
19281  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19282 
19283 #if VMA_RECORDING_ENABLED
19284  if(allocator->GetRecorder() != VMA_NULL)
19285  {
19286  allocator->GetRecorder()->RecordDestroyImage(
19287  allocator->GetCurrentFrameIndex(),
19288  allocation);
19289  }
19290 #endif
19291 
19292  if(image != VK_NULL_HANDLE)
19293  {
19294  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
19295  }
19296  if(allocation != VK_NULL_HANDLE)
19297  {
19298  allocator->FreeMemory(
19299  1, // allocationCount
19300  &allocation);
19301  }
19302 }
19303 
19304 #endif // #ifdef VMA_IMPLEMENTATION
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2400
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2357
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:2228
VMA_RECORD_FLAG_BITS_MAX_ENUM
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2394
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2355
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2420
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:3024
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3648
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2446
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2291
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2619
VmaDefragmentationPassMoveInfo::memory
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3716
VmaDefragmentationInfo2::pPools
const VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3682
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2763
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:3102
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2846
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:2239
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3108
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2826
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2386
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:2224
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2813
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2426
VMA_RECORD_FLUSH_AFTER_CALL_BIT
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2392
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2360
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:3245
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2586
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2920
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2592
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2359
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:2031
VmaDefragmentationPassMoveInfo::offset
VkDeviceSize offset
Definition: vk_mem_alloc.h:3717
VmaDefragmentationPassInfo::pMoves
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3726
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2857
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaAllocatorInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2518
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2643
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2371
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2415
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2787
VmaAllocatorInfo::device
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2528
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3634
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:3121
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2850
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2264
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3743
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2841
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2702
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, const VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2582
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:3052
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:3064
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3666
VmaDefragmentationPassMoveInfo
Definition: vk_mem_alloc.h:3714
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:3111
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2348
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2346
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3632
VmaAllocationInfo::offset
VkDeviceSize offset
Offset in VkDeviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:3255
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2769
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2356
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:3045
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
Definition: vk_mem_alloc.h:2344
vmaInvalidateAllocations
VkResult vmaInvalidateAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Invalidates memory of given set of allocations.
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2913
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2600
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2776
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2279
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3645
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2364
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3697
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3747
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:3105
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2358
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2396
vmaFlushAllocation
VkResult vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VMA_MEMORY_USAGE_CPU_ONLY
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2733
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2755
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3633
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2484
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2363
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2365
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3711
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2597
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:3069
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2498
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2580
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3751
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, const VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
VmaDefragmentationPassInfo::moveCount
uint32_t moveCount
Definition: vk_mem_alloc.h:3725
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2723
vmaBeginDefragmentationPass
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaFlushAllocations
VkResult vmaFlushAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Flushes memory of given set of allocations.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2590
VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2327
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2588
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2429
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:2234
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:3077
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:3049
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:2226
VmaPool
Represents custom memory pool.
VMA_MEMORY_USAGE_GPU_TO_CPU
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2749
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2820
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:3055
VMA_MEMORY_USAGE_MAX_ENUM
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2765
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2584
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2362
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3642
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3738
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2622
VmaAllocatorInfo
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2513
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3043
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2894
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2867
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2591
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2432
VMA_ALLOCATION_CREATE_STRATEGY_MASK
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2871
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2423
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2471
VmaDefragmentationPassMoveInfo::allocation
VmaAllocation allocation
Definition: vk_mem_alloc.h:3715
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2654
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VmaDefragmentationPassInfo
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2354
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:3275
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2417
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3636
VmaDefragmentationInfo2::pAllocations
const VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3657
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2369
VmaDeviceMemoryCallbacks::pUserData
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:2230
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2592
VMA_MEMORY_USAGE_CPU_TO_GPU
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2740
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2864
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2861
VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2309
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2889
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2591
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:3266
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2402
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2361
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:3280
VmaAllocationCreateInfo::priority
float priority
A floating-point value between 0 and 1, indicating the priority of the allocation relative to other m...
Definition: vk_mem_alloc.h:2927
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:3007
vmaInvalidateAllocation
VkResult vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2599
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null.
Definition: vk_mem_alloc.h:2477
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:3124
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3724
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2598
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2878
VmaAllocatorCreateInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2489
VMA_MEMORY_USAGE_UNKNOWN
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2706
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3702
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2368
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:3091
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2370
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3687
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:3236
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2800
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2367
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:2203
vmaGetAllocatorInfo
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3114
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2989
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3749
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2592
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2831
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:2210
VmaDefragmentationPassMoveInfo
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2883
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2366
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2881
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2899
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
vmaEndDefragmentationPass
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3663
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3753
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2907
VmaAllocatorInfo::physicalDevice
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2523
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3755
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2410
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2591
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:3231
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:3035
VmaAllocatorInfo
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2633
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2876
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VMA_POOL_CREATE_ALGORITHM_MASK
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:3039
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3692
VmaPoolCreateInfo::priority
float priority
A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relat...
Definition: vk_mem_alloc.h:3097
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, const VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3733
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2837
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.