Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2021 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
2020 #ifdef __cplusplus
2021 extern "C" {
2022 #endif
2023 
2024 /*
2025 Define this macro to 0/1 to disable/enable support for recording functionality,
2026 available through VmaAllocatorCreateInfo::pRecordSettings.
2027 */
2028 #ifndef VMA_RECORDING_ENABLED
2029  #define VMA_RECORDING_ENABLED 0
2030 #endif
2031 
2032 #if !defined(NOMINMAX) && defined(VMA_IMPLEMENTATION)
2033  #define NOMINMAX // For windows.h
2034 #endif
2035 
2036 #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
2037  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
2038  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
2039  extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
2040  extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
2041  extern PFN_vkAllocateMemory vkAllocateMemory;
2042  extern PFN_vkFreeMemory vkFreeMemory;
2043  extern PFN_vkMapMemory vkMapMemory;
2044  extern PFN_vkUnmapMemory vkUnmapMemory;
2045  extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
2046  extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
2047  extern PFN_vkBindBufferMemory vkBindBufferMemory;
2048  extern PFN_vkBindImageMemory vkBindImageMemory;
2049  extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
2050  extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
2051  extern PFN_vkCreateBuffer vkCreateBuffer;
2052  extern PFN_vkDestroyBuffer vkDestroyBuffer;
2053  extern PFN_vkCreateImage vkCreateImage;
2054  extern PFN_vkDestroyImage vkDestroyImage;
2055  extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
2056  #if VMA_VULKAN_VERSION >= 1001000
2057  extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
2058  extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
2059  extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
2060  extern PFN_vkBindImageMemory2 vkBindImageMemory2;
2061  extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
2062  #endif // #if VMA_VULKAN_VERSION >= 1001000
2063 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
2064 
2065 #ifndef VULKAN_H_
2066  #include <vulkan/vulkan.h>
2067 #endif
2068 
2069 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
2070 // where AAA = major, BBB = minor, CCC = patch.
2071 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
2072 #if !defined(VMA_VULKAN_VERSION)
2073  #if defined(VK_VERSION_1_2)
2074  #define VMA_VULKAN_VERSION 1002000
2075  #elif defined(VK_VERSION_1_1)
2076  #define VMA_VULKAN_VERSION 1001000
2077  #else
2078  #define VMA_VULKAN_VERSION 1000000
2079  #endif
2080 #endif
2081 
2082 #if !defined(VMA_DEDICATED_ALLOCATION)
2083  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
2084  #define VMA_DEDICATED_ALLOCATION 1
2085  #else
2086  #define VMA_DEDICATED_ALLOCATION 0
2087  #endif
2088 #endif
2089 
2090 #if !defined(VMA_BIND_MEMORY2)
2091  #if VK_KHR_bind_memory2
2092  #define VMA_BIND_MEMORY2 1
2093  #else
2094  #define VMA_BIND_MEMORY2 0
2095  #endif
2096 #endif
2097 
2098 #if !defined(VMA_MEMORY_BUDGET)
2099  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
2100  #define VMA_MEMORY_BUDGET 1
2101  #else
2102  #define VMA_MEMORY_BUDGET 0
2103  #endif
2104 #endif
2105 
2106 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
2107 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
2108  #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
2109  #define VMA_BUFFER_DEVICE_ADDRESS 1
2110  #else
2111  #define VMA_BUFFER_DEVICE_ADDRESS 0
2112  #endif
2113 #endif
2114 
2115 // Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
2116 #if !defined(VMA_MEMORY_PRIORITY)
2117  #if VK_EXT_memory_priority
2118  #define VMA_MEMORY_PRIORITY 1
2119  #else
2120  #define VMA_MEMORY_PRIORITY 0
2121  #endif
2122 #endif
2123 
2124 // Define these macros to decorate all public functions with additional code,
2125 // before and after returned type, appropriately. This may be useful for
2126 // exporting the functions when compiling VMA as a separate library. Example:
2127 // #define VMA_CALL_PRE __declspec(dllexport)
2128 // #define VMA_CALL_POST __cdecl
2129 #ifndef VMA_CALL_PRE
2130  #define VMA_CALL_PRE
2131 #endif
2132 #ifndef VMA_CALL_POST
2133  #define VMA_CALL_POST
2134 #endif
2135 
2136 // Define this macro to decorate pointers with an attribute specifying the
2137 // length of the array they point to if they are not null.
2138 //
2139 // The length may be one of
2140 // - The name of another parameter in the argument list where the pointer is declared
2141 // - The name of another member in the struct where the pointer is declared
2142 // - The name of a member of a struct type, meaning the value of that member in
2143 // the context of the call. For example
2144 // VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
2145 // this means the number of memory heaps available in the device associated
2146 // with the VmaAllocator being dealt with.
2147 #ifndef VMA_LEN_IF_NOT_NULL
2148  #define VMA_LEN_IF_NOT_NULL(len)
2149 #endif
2150 
2151 // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
2152 // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
2153 #ifndef VMA_NULLABLE
2154  #ifdef __clang__
2155  #define VMA_NULLABLE _Nullable
2156  #else
2157  #define VMA_NULLABLE
2158  #endif
2159 #endif
2160 
2161 // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
2162 // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
2163 #ifndef VMA_NOT_NULL
2164  #ifdef __clang__
2165  #define VMA_NOT_NULL _Nonnull
2166  #else
2167  #define VMA_NOT_NULL
2168  #endif
2169 #endif
2170 
2171 // If non-dispatchable handles are represented as pointers then we can give
2172 // then nullability annotations
2173 #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
2174  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2175  #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
2176  #else
2177  #define VMA_NOT_NULL_NON_DISPATCHABLE
2178  #endif
2179 #endif
2180 
2181 #ifndef VMA_NULLABLE_NON_DISPATCHABLE
2182  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2183  #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
2184  #else
2185  #define VMA_NULLABLE_NON_DISPATCHABLE
2186  #endif
2187 #endif
2188 
2198 VK_DEFINE_HANDLE(VmaAllocator)
2199 
2200 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
2202  VmaAllocator VMA_NOT_NULL allocator,
2203  uint32_t memoryType,
2204  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2205  VkDeviceSize size,
2206  void* VMA_NULLABLE pUserData);
2208 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
2209  VmaAllocator VMA_NOT_NULL allocator,
2210  uint32_t memoryType,
2211  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2212  VkDeviceSize size,
2213  void* VMA_NULLABLE pUserData);
2214 
2228  void* VMA_NULLABLE pUserData;
2230 
2343 
2346 typedef VkFlags VmaAllocatorCreateFlags;
2347 
2352 typedef struct VmaVulkanFunctions {
2353  PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
2354  PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
2355  PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
2356  PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
2357  PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
2358  PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
2359  PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
2360  PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
2361  PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
2362  PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
2363  PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
2364  PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
2365  PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
2366  PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
2367  PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
2368  PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
2369  PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
2370 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2371  PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
2372  PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
2373 #endif
2374 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2375  PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
2376  PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
2377 #endif
2378 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2379  PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
2380 #endif
2382 
2384 typedef enum VmaRecordFlagBits {
2391 
2392  VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
2394 typedef VkFlags VmaRecordFlags;
2395 
2397 typedef struct VmaRecordSettings
2398 {
2408  const char* VMA_NOT_NULL pFilePath;
2410 
2413 {
2417 
2418  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2420 
2421  VkDevice VMA_NOT_NULL device;
2423 
2426 
2427  const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
2429 
2469  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
2470 
2482  const VmaRecordSettings* VMA_NULLABLE pRecordSettings;
2487  VkInstance VMA_NOT_NULL instance;
2498 
2500 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2501  const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
2502  VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator);
2503 
2505 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2506  VmaAllocator VMA_NULLABLE allocator);
2507 
2510 typedef struct VmaAllocatorInfo
2511 {
2516  VkInstance VMA_NOT_NULL instance;
2521  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2526  VkDevice VMA_NOT_NULL device;
2528 
2534 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
2535 
2540 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2541  VmaAllocator VMA_NOT_NULL allocator,
2542  const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties);
2543 
2548 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2549  VmaAllocator VMA_NOT_NULL allocator,
2550  const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
2551 
2558 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2559  VmaAllocator VMA_NOT_NULL allocator,
2560  uint32_t memoryTypeIndex,
2561  VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
2562 
2571 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2572  VmaAllocator VMA_NOT_NULL allocator,
2573  uint32_t frameIndex);
2574 
2577 typedef struct VmaStatInfo
2578 {
2580  uint32_t blockCount;
2586  VkDeviceSize usedBytes;
2588  VkDeviceSize unusedBytes;
2589  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
2590  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
2592 
2594 typedef struct VmaStats
2595 {
2596  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2597  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2600 
2610 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2611  VmaAllocator VMA_NOT_NULL allocator,
2612  VmaStats* VMA_NOT_NULL pStats);
2613 
2616 typedef struct VmaBudget
2617 {
2620  VkDeviceSize blockBytes;
2621 
2631  VkDeviceSize allocationBytes;
2632 
2641  VkDeviceSize usage;
2642 
2652  VkDeviceSize budget;
2654 
2665 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2666  VmaAllocator VMA_NOT_NULL allocator,
2667  VmaBudget* VMA_NOT_NULL pBudget);
2668 
2669 #ifndef VMA_STATS_STRING_ENABLED
2670 #define VMA_STATS_STRING_ENABLED 1
2671 #endif
2672 
2673 #if VMA_STATS_STRING_ENABLED
2674 
2676 
2678 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2679  VmaAllocator VMA_NOT_NULL allocator,
2680  char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString,
2681  VkBool32 detailedMap);
2682 
2683 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2684  VmaAllocator VMA_NOT_NULL allocator,
2685  char* VMA_NULLABLE pStatsString);
2686 
2687 #endif // #if VMA_STATS_STRING_ENABLED
2688 
2697 VK_DEFINE_HANDLE(VmaPool)
2698 
2699 typedef enum VmaMemoryUsage
2700 {
2762 
2763  VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
2765 
2775 
2840 
2856 
2866 
2873 
2877 
2879 {
2892  VkMemoryPropertyFlags requiredFlags;
2897  VkMemoryPropertyFlags preferredFlags;
2905  uint32_t memoryTypeBits;
2911  VmaPool VMA_NULLABLE pool;
2918  void* VMA_NULLABLE pUserData;
2925  float priority;
2927 
2944 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2945  VmaAllocator VMA_NOT_NULL allocator,
2946  uint32_t memoryTypeBits,
2947  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2948  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2949 
2962 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2963  VmaAllocator VMA_NOT_NULL allocator,
2964  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2965  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2966  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2967 
2980 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2981  VmaAllocator VMA_NOT_NULL allocator,
2982  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2983  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2984  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2985 
3006 
3023 
3034 
3040 
3043 typedef VkFlags VmaPoolCreateFlags;
3044 
3047 typedef struct VmaPoolCreateInfo {
3062  VkDeviceSize blockSize;
3095  float priority;
3112  void* VMA_NULLABLE pMemoryAllocateNext;
3114 
3117 typedef struct VmaPoolStats {
3120  VkDeviceSize size;
3123  VkDeviceSize unusedSize;
3136  VkDeviceSize unusedRangeSizeMax;
3139  size_t blockCount;
3141 
3148 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
3149  VmaAllocator VMA_NOT_NULL allocator,
3150  const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
3151  VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool);
3152 
3155 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
3156  VmaAllocator VMA_NOT_NULL allocator,
3157  VmaPool VMA_NULLABLE pool);
3158 
3165 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
3166  VmaAllocator VMA_NOT_NULL allocator,
3167  VmaPool VMA_NOT_NULL pool,
3168  VmaPoolStats* VMA_NOT_NULL pPoolStats);
3169 
3176 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
3177  VmaAllocator VMA_NOT_NULL allocator,
3178  VmaPool VMA_NOT_NULL pool,
3179  size_t* VMA_NULLABLE pLostAllocationCount);
3180 
3195 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool);
3196 
3203 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
3204  VmaAllocator VMA_NOT_NULL allocator,
3205  VmaPool VMA_NOT_NULL pool,
3206  const char* VMA_NULLABLE * VMA_NOT_NULL ppName);
3207 
3213 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
3214  VmaAllocator VMA_NOT_NULL allocator,
3215  VmaPool VMA_NOT_NULL pool,
3216  const char* VMA_NULLABLE pName);
3217 
3242 VK_DEFINE_HANDLE(VmaAllocation)
3243 
3244 
3246 typedef struct VmaAllocationInfo {
3251  uint32_t memoryType;
3260  VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
3270  VkDeviceSize offset;
3281  VkDeviceSize size;
3290  void* VMA_NULLABLE pMappedData;
3295  void* VMA_NULLABLE pUserData;
3297 
3308 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3309  VmaAllocator VMA_NOT_NULL allocator,
3310  const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
3311  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3312  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3313  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3314 
3334 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3335  VmaAllocator VMA_NOT_NULL allocator,
3336  const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
3337  const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
3338  size_t allocationCount,
3339  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3340  VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
3341 
3348 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3349  VmaAllocator VMA_NOT_NULL allocator,
3350  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3351  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3352  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3353  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3354 
3356 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3357  VmaAllocator VMA_NOT_NULL allocator,
3358  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3359  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3360  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3361  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3362 
3367 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3368  VmaAllocator VMA_NOT_NULL allocator,
3369  const VmaAllocation VMA_NULLABLE allocation);
3370 
3381 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3382  VmaAllocator VMA_NOT_NULL allocator,
3383  size_t allocationCount,
3384  const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
3385 
3402 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3403  VmaAllocator VMA_NOT_NULL allocator,
3404  VmaAllocation VMA_NOT_NULL allocation,
3405  VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
3406 
3421 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3422  VmaAllocator VMA_NOT_NULL allocator,
3423  VmaAllocation VMA_NOT_NULL allocation);
3424 
3438 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3439  VmaAllocator VMA_NOT_NULL allocator,
3440  VmaAllocation VMA_NOT_NULL allocation,
3441  void* VMA_NULLABLE pUserData);
3442 
3453 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3454  VmaAllocator VMA_NOT_NULL allocator,
3455  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation);
3456 
3495 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3496  VmaAllocator VMA_NOT_NULL allocator,
3497  VmaAllocation VMA_NOT_NULL allocation,
3498  void* VMA_NULLABLE * VMA_NOT_NULL ppData);
3499 
3508 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3509  VmaAllocator VMA_NOT_NULL allocator,
3510  VmaAllocation VMA_NOT_NULL allocation);
3511 
3533 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
3534  VmaAllocator VMA_NOT_NULL allocator,
3535  VmaAllocation VMA_NOT_NULL allocation,
3536  VkDeviceSize offset,
3537  VkDeviceSize size);
3538 
3560 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
3561  VmaAllocator VMA_NOT_NULL allocator,
3562  VmaAllocation VMA_NOT_NULL allocation,
3563  VkDeviceSize offset,
3564  VkDeviceSize size);
3565 
3580 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
3581  VmaAllocator VMA_NOT_NULL allocator,
3582  uint32_t allocationCount,
3583  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3584  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3585  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3586 
3601 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
3602  VmaAllocator VMA_NOT_NULL allocator,
3603  uint32_t allocationCount,
3604  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3605  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3606  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3607 
3624 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits);
3625 
3632 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3633 
3634 typedef enum VmaDefragmentationFlagBits {
3639 typedef VkFlags VmaDefragmentationFlags;
3640 
3645 typedef struct VmaDefragmentationInfo2 {
3660  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
3666  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged;
3669  uint32_t poolCount;
3685  const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
3690  VkDeviceSize maxCpuBytesToMove;
3700  VkDeviceSize maxGpuBytesToMove;
3714  VkCommandBuffer VMA_NULLABLE commandBuffer;
3716 
3719  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory;
3720  VkDeviceSize offset;
3722 
3728  uint32_t moveCount;
3729  VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
3731 
3736 typedef struct VmaDefragmentationInfo {
3741  VkDeviceSize maxBytesToMove;
3748 
3750 typedef struct VmaDefragmentationStats {
3752  VkDeviceSize bytesMoved;
3754  VkDeviceSize bytesFreed;
3760 
3790 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3791  VmaAllocator VMA_NOT_NULL allocator,
3792  const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo,
3793  VmaDefragmentationStats* VMA_NULLABLE pStats,
3794  VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext);
3795 
3801 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3802  VmaAllocator VMA_NOT_NULL allocator,
3803  VmaDefragmentationContext VMA_NULLABLE context);
3804 
3805 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3806  VmaAllocator VMA_NOT_NULL allocator,
3807  VmaDefragmentationContext VMA_NULLABLE context,
3808  VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo
3809 );
3810 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3811  VmaAllocator VMA_NOT_NULL allocator,
3812  VmaDefragmentationContext VMA_NULLABLE context
3813 );
3814 
3855 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3856  VmaAllocator VMA_NOT_NULL allocator,
3857  const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3858  size_t allocationCount,
3859  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
3860  const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo,
3861  VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats);
3862 
3875 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3876  VmaAllocator VMA_NOT_NULL allocator,
3877  VmaAllocation VMA_NOT_NULL allocation,
3878  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
3879 
3890 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3891  VmaAllocator VMA_NOT_NULL allocator,
3892  VmaAllocation VMA_NOT_NULL allocation,
3893  VkDeviceSize allocationLocalOffset,
3894  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3895  const void* VMA_NULLABLE pNext);
3896 
3909 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3910  VmaAllocator VMA_NOT_NULL allocator,
3911  VmaAllocation VMA_NOT_NULL allocation,
3912  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
3913 
3924 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3925  VmaAllocator VMA_NOT_NULL allocator,
3926  VmaAllocation VMA_NOT_NULL allocation,
3927  VkDeviceSize allocationLocalOffset,
3928  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3929  const void* VMA_NULLABLE pNext);
3930 
3961 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3962  VmaAllocator VMA_NOT_NULL allocator,
3963  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
3964  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3965  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
3966  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3967  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3968 
3980 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3981  VmaAllocator VMA_NOT_NULL allocator,
3982  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
3983  VmaAllocation VMA_NULLABLE allocation);
3984 
3986 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3987  VmaAllocator VMA_NOT_NULL allocator,
3988  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
3989  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3990  VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
3991  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3992  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3993 
4005 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
4006  VmaAllocator VMA_NOT_NULL allocator,
4007  VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
4008  VmaAllocation VMA_NULLABLE allocation);
4009 
4010 #ifdef __cplusplus
4011 }
4012 #endif
4013 
4014 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
4015 
4016 // For Visual Studio IntelliSense.
4017 #if defined(__cplusplus) && defined(__INTELLISENSE__)
4018 #define VMA_IMPLEMENTATION
4019 #endif
4020 
4021 #ifdef VMA_IMPLEMENTATION
4022 #undef VMA_IMPLEMENTATION
4023 
4024 #include <cstdint>
4025 #include <cstdlib>
4026 #include <cstring>
4027 #include <utility>
4028 
4029 #if VMA_RECORDING_ENABLED
4030  #include <chrono>
4031  #if defined(_WIN32)
4032  #include <windows.h>
4033  #else
4034  #include <sstream>
4035  #include <thread>
4036  #endif
4037 #endif
4038 
4039 /*******************************************************************************
4040 CONFIGURATION SECTION
4041 
4042 Define some of these macros before each #include of this header or change them
4043 here if you need other then default behavior depending on your environment.
4044 */
4045 
4046 /*
4047 Define this macro to 1 to make the library fetch pointers to Vulkan functions
4048 internally, like:
4049 
4050  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
4051 */
4052 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
4053  #define VMA_STATIC_VULKAN_FUNCTIONS 1
4054 #endif
4055 
4056 /*
4057 Define this macro to 1 to make the library fetch pointers to Vulkan functions
4058 internally, like:
4059 
4060  vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory);
4061 */
4062 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
4063  #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
4064  #if defined(VK_NO_PROTOTYPES)
4065  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
4066  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
4067  #endif
4068 #endif
4069 
4070 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
4071 //#define VMA_USE_STL_CONTAINERS 1
4072 
4073 /* Set this macro to 1 to make the library including and using STL containers:
4074 std::pair, std::vector, std::list, std::unordered_map.
4075 
4076 Set it to 0 or undefined to make the library using its own implementation of
4077 the containers.
4078 */
4079 #if VMA_USE_STL_CONTAINERS
4080  #define VMA_USE_STL_VECTOR 1
4081  #define VMA_USE_STL_UNORDERED_MAP 1
4082  #define VMA_USE_STL_LIST 1
4083 #endif
4084 
4085 #ifndef VMA_USE_STL_SHARED_MUTEX
4086  // Compiler conforms to C++17.
4087  #if __cplusplus >= 201703L
4088  #define VMA_USE_STL_SHARED_MUTEX 1
4089  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
4090  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
4091  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
4092  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
4093  #define VMA_USE_STL_SHARED_MUTEX 1
4094  #else
4095  #define VMA_USE_STL_SHARED_MUTEX 0
4096  #endif
4097 #endif
4098 
4099 /*
4100 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
4101 Library has its own container implementation.
4102 */
4103 #if VMA_USE_STL_VECTOR
4104  #include <vector>
4105 #endif
4106 
4107 #if VMA_USE_STL_UNORDERED_MAP
4108  #include <unordered_map>
4109 #endif
4110 
4111 #if VMA_USE_STL_LIST
4112  #include <list>
4113 #endif
4114 
4115 /*
4116 Following headers are used in this CONFIGURATION section only, so feel free to
4117 remove them if not needed.
4118 */
4119 #include <cassert> // for assert
4120 #include <algorithm> // for min, max
4121 #include <mutex>
4122 
4123 #ifndef VMA_NULL
4124  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
4125  #define VMA_NULL nullptr
4126 #endif
4127 
4128 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
4129 #include <cstdlib>
4130 static void* vma_aligned_alloc(size_t alignment, size_t size)
4131 {
4132  // alignment must be >= sizeof(void*)
4133  if(alignment < sizeof(void*))
4134  {
4135  alignment = sizeof(void*);
4136  }
4137 
4138  return memalign(alignment, size);
4139 }
4140 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
4141 #include <cstdlib>
4142 
4143 #if defined(__APPLE__)
4144 #include <AvailabilityMacros.h>
4145 #endif
4146 
4147 static void* vma_aligned_alloc(size_t alignment, size_t size)
4148 {
4149 #if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
4150 #if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
4151  // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
4152  // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
4153  // MAC_OS_X_VERSION_10_16), even though the function is marked
4154  // availabe for 10.15. That's why the preprocessor checks for 10.16 but
4155  // the __builtin_available checks for 10.15.
4156  // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
4157  if (__builtin_available(macOS 10.15, iOS 13, *))
4158  return aligned_alloc(alignment, size);
4159 #endif
4160 #endif
4161  // alignment must be >= sizeof(void*)
4162  if(alignment < sizeof(void*))
4163  {
4164  alignment = sizeof(void*);
4165  }
4166 
4167  void *pointer;
4168  if(posix_memalign(&pointer, alignment, size) == 0)
4169  return pointer;
4170  return VMA_NULL;
4171 }
4172 #elif defined(_WIN32)
4173 static void* vma_aligned_alloc(size_t alignment, size_t size)
4174 {
4175  return _aligned_malloc(size, alignment);
4176 }
4177 #else
4178 static void* vma_aligned_alloc(size_t alignment, size_t size)
4179 {
4180  return aligned_alloc(alignment, size);
4181 }
4182 #endif
4183 
4184 #if defined(_WIN32)
4185 static void vma_aligned_free(void* ptr)
4186 {
4187  _aligned_free(ptr);
4188 }
4189 #else
4190 static void vma_aligned_free(void* VMA_NULLABLE ptr)
4191 {
4192  free(ptr);
4193 }
4194 #endif
4195 
4196 // If your compiler is not compatible with C++11 and definition of
4197 // aligned_alloc() function is missing, uncommeting following line may help:
4198 
4199 //#include <malloc.h>
4200 
4201 // Normal assert to check for programmer's errors, especially in Debug configuration.
4202 #ifndef VMA_ASSERT
4203  #ifdef NDEBUG
4204  #define VMA_ASSERT(expr)
4205  #else
4206  #define VMA_ASSERT(expr) assert(expr)
4207  #endif
4208 #endif
4209 
4210 // Assert that will be called very often, like inside data structures e.g. operator[].
4211 // Making it non-empty can make program slow.
4212 #ifndef VMA_HEAVY_ASSERT
4213  #ifdef NDEBUG
4214  #define VMA_HEAVY_ASSERT(expr)
4215  #else
4216  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
4217  #endif
4218 #endif
4219 
4220 #ifndef VMA_ALIGN_OF
4221  #define VMA_ALIGN_OF(type) (__alignof(type))
4222 #endif
4223 
4224 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
4225  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
4226 #endif
4227 
4228 #ifndef VMA_SYSTEM_ALIGNED_FREE
4229  // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
4230  #if defined(VMA_SYSTEM_FREE)
4231  #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
4232  #else
4233  #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
4234  #endif
4235 #endif
4236 
4237 #ifndef VMA_MIN
4238  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
4239 #endif
4240 
4241 #ifndef VMA_MAX
4242  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
4243 #endif
4244 
4245 #ifndef VMA_SWAP
4246  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
4247 #endif
4248 
4249 #ifndef VMA_SORT
4250  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
4251 #endif
4252 
4253 #ifndef VMA_DEBUG_LOG
4254  #define VMA_DEBUG_LOG(format, ...)
4255  /*
4256  #define VMA_DEBUG_LOG(format, ...) do { \
4257  printf(format, __VA_ARGS__); \
4258  printf("\n"); \
4259  } while(false)
4260  */
4261 #endif
4262 
4263 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
4264 #if VMA_STATS_STRING_ENABLED
4265  static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
4266  {
4267  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
4268  }
4269  static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
4270  {
4271  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
4272  }
4273  static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
4274  {
4275  snprintf(outStr, strLen, "%p", ptr);
4276  }
4277 #endif
4278 
4279 #ifndef VMA_MUTEX
4280  class VmaMutex
4281  {
4282  public:
4283  void Lock() { m_Mutex.lock(); }
4284  void Unlock() { m_Mutex.unlock(); }
4285  bool TryLock() { return m_Mutex.try_lock(); }
4286  private:
4287  std::mutex m_Mutex;
4288  };
4289  #define VMA_MUTEX VmaMutex
4290 #endif
4291 
4292 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
4293 #ifndef VMA_RW_MUTEX
4294  #if VMA_USE_STL_SHARED_MUTEX
4295  // Use std::shared_mutex from C++17.
4296  #include <shared_mutex>
4297  class VmaRWMutex
4298  {
4299  public:
4300  void LockRead() { m_Mutex.lock_shared(); }
4301  void UnlockRead() { m_Mutex.unlock_shared(); }
4302  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
4303  void LockWrite() { m_Mutex.lock(); }
4304  void UnlockWrite() { m_Mutex.unlock(); }
4305  bool TryLockWrite() { return m_Mutex.try_lock(); }
4306  private:
4307  std::shared_mutex m_Mutex;
4308  };
4309  #define VMA_RW_MUTEX VmaRWMutex
4310  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
4311  // Use SRWLOCK from WinAPI.
4312  // Minimum supported client = Windows Vista, server = Windows Server 2008.
4313  class VmaRWMutex
4314  {
4315  public:
4316  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
4317  void LockRead() { AcquireSRWLockShared(&m_Lock); }
4318  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
4319  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
4320  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
4321  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
4322  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
4323  private:
4324  SRWLOCK m_Lock;
4325  };
4326  #define VMA_RW_MUTEX VmaRWMutex
4327  #else
4328  // Less efficient fallback: Use normal mutex.
4329  class VmaRWMutex
4330  {
4331  public:
4332  void LockRead() { m_Mutex.Lock(); }
4333  void UnlockRead() { m_Mutex.Unlock(); }
4334  bool TryLockRead() { return m_Mutex.TryLock(); }
4335  void LockWrite() { m_Mutex.Lock(); }
4336  void UnlockWrite() { m_Mutex.Unlock(); }
4337  bool TryLockWrite() { return m_Mutex.TryLock(); }
4338  private:
4339  VMA_MUTEX m_Mutex;
4340  };
4341  #define VMA_RW_MUTEX VmaRWMutex
4342  #endif // #if VMA_USE_STL_SHARED_MUTEX
4343 #endif // #ifndef VMA_RW_MUTEX
4344 
4345 /*
4346 If providing your own implementation, you need to implement a subset of std::atomic.
4347 */
4348 #ifndef VMA_ATOMIC_UINT32
4349  #include <atomic>
4350  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
4351 #endif
4352 
4353 #ifndef VMA_ATOMIC_UINT64
4354  #include <atomic>
4355  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
4356 #endif
4357 
4358 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
4363  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
4364 #endif
4365 
4366 #ifndef VMA_MIN_ALIGNMENT
4371  #ifdef VMA_DEBUG_ALIGNMENT // Old name
4372  #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
4373  #else
4374  #define VMA_MIN_ALIGNMENT (1)
4375  #endif
4376 #endif
4377 
4378 #ifndef VMA_DEBUG_MARGIN
4383  #define VMA_DEBUG_MARGIN (0)
4384 #endif
4385 
4386 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
4391  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
4392 #endif
4393 
4394 #ifndef VMA_DEBUG_DETECT_CORRUPTION
4400  #define VMA_DEBUG_DETECT_CORRUPTION (0)
4401 #endif
4402 
4403 #ifndef VMA_DEBUG_GLOBAL_MUTEX
4408  #define VMA_DEBUG_GLOBAL_MUTEX (0)
4409 #endif
4410 
4411 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4416  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
4417 #endif
4418 
4419 #ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
4420  /*
4421  Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
4422  and return error instead of leaving up to Vulkan implementation what to do in such cases.
4423  */
4424  #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
4425 #endif
4426 
4427 #ifndef VMA_SMALL_HEAP_MAX_SIZE
4429  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
4430 #endif
4431 
4432 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
4434  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
4435 #endif
4436 
4437 #ifndef VMA_CLASS_NO_COPY
4438  #define VMA_CLASS_NO_COPY(className) \
4439  private: \
4440  className(const className&) = delete; \
4441  className& operator=(const className&) = delete;
4442 #endif
4443 
4444 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4445 
4446 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4447 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4448 
4449 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4450 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4451 
4452 /*******************************************************************************
4453 END OF CONFIGURATION
4454 */
4455 
4456 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4457 
4458 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4459 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4460 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4461 
4462 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4463 
4464 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4465  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4466 
4467 // Returns number of bits set to 1 in (v).
4468 static inline uint32_t VmaCountBitsSet(uint32_t v)
4469 {
4470  uint32_t c = v - ((v >> 1) & 0x55555555);
4471  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4472  c = ((c >> 4) + c) & 0x0F0F0F0F;
4473  c = ((c >> 8) + c) & 0x00FF00FF;
4474  c = ((c >> 16) + c) & 0x0000FFFF;
4475  return c;
4476 }
4477 
4478 /*
4479 Returns true if given number is a power of two.
4480 T must be unsigned integer number or signed integer but always nonnegative.
4481 For 0 returns true.
4482 */
4483 template <typename T>
4484 inline bool VmaIsPow2(T x)
4485 {
4486  return (x & (x-1)) == 0;
4487 }
4488 
4489 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4490 // Use types like uint32_t, uint64_t as T.
4491 template <typename T>
4492 static inline T VmaAlignUp(T val, T alignment)
4493 {
4494  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4495  return (val + alignment - 1) & ~(alignment - 1);
4496 }
4497 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4498 // Use types like uint32_t, uint64_t as T.
4499 template <typename T>
4500 static inline T VmaAlignDown(T val, T alignment)
4501 {
4502  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4503  return val & ~(alignment - 1);
4504 }
4505 
4506 // Division with mathematical rounding to nearest number.
4507 template <typename T>
4508 static inline T VmaRoundDiv(T x, T y)
4509 {
4510  return (x + (y / (T)2)) / y;
4511 }
4512 
4513 // Returns smallest power of 2 greater or equal to v.
4514 static inline uint32_t VmaNextPow2(uint32_t v)
4515 {
4516  v--;
4517  v |= v >> 1;
4518  v |= v >> 2;
4519  v |= v >> 4;
4520  v |= v >> 8;
4521  v |= v >> 16;
4522  v++;
4523  return v;
4524 }
4525 static inline uint64_t VmaNextPow2(uint64_t v)
4526 {
4527  v--;
4528  v |= v >> 1;
4529  v |= v >> 2;
4530  v |= v >> 4;
4531  v |= v >> 8;
4532  v |= v >> 16;
4533  v |= v >> 32;
4534  v++;
4535  return v;
4536 }
4537 
4538 // Returns largest power of 2 less or equal to v.
4539 static inline uint32_t VmaPrevPow2(uint32_t v)
4540 {
4541  v |= v >> 1;
4542  v |= v >> 2;
4543  v |= v >> 4;
4544  v |= v >> 8;
4545  v |= v >> 16;
4546  v = v ^ (v >> 1);
4547  return v;
4548 }
4549 static inline uint64_t VmaPrevPow2(uint64_t v)
4550 {
4551  v |= v >> 1;
4552  v |= v >> 2;
4553  v |= v >> 4;
4554  v |= v >> 8;
4555  v |= v >> 16;
4556  v |= v >> 32;
4557  v = v ^ (v >> 1);
4558  return v;
4559 }
4560 
4561 static inline bool VmaStrIsEmpty(const char* pStr)
4562 {
4563  return pStr == VMA_NULL || *pStr == '\0';
4564 }
4565 
4566 #if VMA_STATS_STRING_ENABLED
4567 
4568 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4569 {
4570  switch(algorithm)
4571  {
4573  return "Linear";
4575  return "Buddy";
4576  case 0:
4577  return "Default";
4578  default:
4579  VMA_ASSERT(0);
4580  return "";
4581  }
4582 }
4583 
4584 #endif // #if VMA_STATS_STRING_ENABLED
4585 
4586 #ifndef VMA_SORT
4587 
4588 template<typename Iterator, typename Compare>
4589 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4590 {
4591  Iterator centerValue = end; --centerValue;
4592  Iterator insertIndex = beg;
4593  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4594  {
4595  if(cmp(*memTypeIndex, *centerValue))
4596  {
4597  if(insertIndex != memTypeIndex)
4598  {
4599  VMA_SWAP(*memTypeIndex, *insertIndex);
4600  }
4601  ++insertIndex;
4602  }
4603  }
4604  if(insertIndex != centerValue)
4605  {
4606  VMA_SWAP(*insertIndex, *centerValue);
4607  }
4608  return insertIndex;
4609 }
4610 
4611 template<typename Iterator, typename Compare>
4612 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4613 {
4614  if(beg < end)
4615  {
4616  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4617  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4618  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4619  }
4620 }
4621 
4622 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4623 
4624 #endif // #ifndef VMA_SORT
4625 
4626 /*
4627 Returns true if two memory blocks occupy overlapping pages.
4628 ResourceA must be in less memory offset than ResourceB.
4629 
4630 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4631 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4632 */
4633 static inline bool VmaBlocksOnSamePage(
4634  VkDeviceSize resourceAOffset,
4635  VkDeviceSize resourceASize,
4636  VkDeviceSize resourceBOffset,
4637  VkDeviceSize pageSize)
4638 {
4639  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4640  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4641  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4642  VkDeviceSize resourceBStart = resourceBOffset;
4643  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4644  return resourceAEndPage == resourceBStartPage;
4645 }
4646 
4647 enum VmaSuballocationType
4648 {
4649  VMA_SUBALLOCATION_TYPE_FREE = 0,
4650  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4651  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4652  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4653  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4654  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4655  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4656 };
4657 
4658 /*
4659 Returns true if given suballocation types could conflict and must respect
4660 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4661 or linear image and another one is optimal image. If type is unknown, behave
4662 conservatively.
4663 */
4664 static inline bool VmaIsBufferImageGranularityConflict(
4665  VmaSuballocationType suballocType1,
4666  VmaSuballocationType suballocType2)
4667 {
4668  if(suballocType1 > suballocType2)
4669  {
4670  VMA_SWAP(suballocType1, suballocType2);
4671  }
4672 
4673  switch(suballocType1)
4674  {
4675  case VMA_SUBALLOCATION_TYPE_FREE:
4676  return false;
4677  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4678  return true;
4679  case VMA_SUBALLOCATION_TYPE_BUFFER:
4680  return
4681  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4682  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4683  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4684  return
4685  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4686  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4687  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4688  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4689  return
4690  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4691  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4692  return false;
4693  default:
4694  VMA_ASSERT(0);
4695  return true;
4696  }
4697 }
4698 
4699 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4700 {
4701 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4702  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4703  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4704  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4705  {
4706  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4707  }
4708 #else
4709  // no-op
4710 #endif
4711 }
4712 
4713 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4714 {
4715 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4716  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4717  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4718  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4719  {
4720  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4721  {
4722  return false;
4723  }
4724  }
4725 #endif
4726  return true;
4727 }
4728 
4729 /*
4730 Fills structure with parameters of an example buffer to be used for transfers
4731 during GPU memory defragmentation.
4732 */
4733 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4734 {
4735  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4736  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4737  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4738  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4739 }
4740 
4741 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4742 struct VmaMutexLock
4743 {
4744  VMA_CLASS_NO_COPY(VmaMutexLock)
4745 public:
4746  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4747  m_pMutex(useMutex ? &mutex : VMA_NULL)
4748  { if(m_pMutex) { m_pMutex->Lock(); } }
4749  ~VmaMutexLock()
4750  { if(m_pMutex) { m_pMutex->Unlock(); } }
4751 private:
4752  VMA_MUTEX* m_pMutex;
4753 };
4754 
4755 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4756 struct VmaMutexLockRead
4757 {
4758  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4759 public:
4760  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4761  m_pMutex(useMutex ? &mutex : VMA_NULL)
4762  { if(m_pMutex) { m_pMutex->LockRead(); } }
4763  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4764 private:
4765  VMA_RW_MUTEX* m_pMutex;
4766 };
4767 
4768 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4769 struct VmaMutexLockWrite
4770 {
4771  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4772 public:
4773  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4774  m_pMutex(useMutex ? &mutex : VMA_NULL)
4775  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4776  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4777 private:
4778  VMA_RW_MUTEX* m_pMutex;
4779 };
4780 
4781 #if VMA_DEBUG_GLOBAL_MUTEX
4782  static VMA_MUTEX gDebugGlobalMutex;
4783  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4784 #else
4785  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4786 #endif
4787 
4788 // Minimum size of a free suballocation to register it in the free suballocation collection.
4789 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4790 
4791 /*
4792 Performs binary search and returns iterator to first element that is greater or
4793 equal to (key), according to comparison (cmp).
4794 
4795 Cmp should return true if first argument is less than second argument.
4796 
4797 Returned value is the found element, if present in the collection or place where
4798 new element with value (key) should be inserted.
4799 */
4800 template <typename CmpLess, typename IterT, typename KeyT>
4801 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4802 {
4803  size_t down = 0, up = (end - beg);
4804  while(down < up)
4805  {
4806  const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
4807  if(cmp(*(beg+mid), key))
4808  {
4809  down = mid + 1;
4810  }
4811  else
4812  {
4813  up = mid;
4814  }
4815  }
4816  return beg + down;
4817 }
4818 
4819 template<typename CmpLess, typename IterT, typename KeyT>
4820 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4821 {
4822  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4823  beg, end, value, cmp);
4824  if(it == end ||
4825  (!cmp(*it, value) && !cmp(value, *it)))
4826  {
4827  return it;
4828  }
4829  return end;
4830 }
4831 
4832 /*
4833 Returns true if all pointers in the array are not-null and unique.
4834 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4835 T must be pointer type, e.g. VmaAllocation, VmaPool.
4836 */
4837 template<typename T>
4838 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4839 {
4840  for(uint32_t i = 0; i < count; ++i)
4841  {
4842  const T iPtr = arr[i];
4843  if(iPtr == VMA_NULL)
4844  {
4845  return false;
4846  }
4847  for(uint32_t j = i + 1; j < count; ++j)
4848  {
4849  if(iPtr == arr[j])
4850  {
4851  return false;
4852  }
4853  }
4854  }
4855  return true;
4856 }
4857 
4858 template<typename MainT, typename NewT>
4859 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4860 {
4861  newStruct->pNext = mainStruct->pNext;
4862  mainStruct->pNext = newStruct;
4863 }
4864 
4866 // Memory allocation
4867 
4868 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4869 {
4870  void* result = VMA_NULL;
4871  if((pAllocationCallbacks != VMA_NULL) &&
4872  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4873  {
4874  result = (*pAllocationCallbacks->pfnAllocation)(
4875  pAllocationCallbacks->pUserData,
4876  size,
4877  alignment,
4878  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4879  }
4880  else
4881  {
4882  result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4883  }
4884  VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
4885  return result;
4886 }
4887 
4888 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4889 {
4890  if((pAllocationCallbacks != VMA_NULL) &&
4891  (pAllocationCallbacks->pfnFree != VMA_NULL))
4892  {
4893  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4894  }
4895  else
4896  {
4897  VMA_SYSTEM_ALIGNED_FREE(ptr);
4898  }
4899 }
4900 
4901 template<typename T>
4902 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4903 {
4904  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4905 }
4906 
4907 template<typename T>
4908 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4909 {
4910  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4911 }
4912 
4913 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4914 
4915 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4916 
4917 template<typename T>
4918 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4919 {
4920  ptr->~T();
4921  VmaFree(pAllocationCallbacks, ptr);
4922 }
4923 
4924 template<typename T>
4925 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4926 {
4927  if(ptr != VMA_NULL)
4928  {
4929  for(size_t i = count; i--; )
4930  {
4931  ptr[i].~T();
4932  }
4933  VmaFree(pAllocationCallbacks, ptr);
4934  }
4935 }
4936 
4937 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4938 {
4939  if(srcStr != VMA_NULL)
4940  {
4941  const size_t len = strlen(srcStr);
4942  char* const result = vma_new_array(allocs, char, len + 1);
4943  memcpy(result, srcStr, len + 1);
4944  return result;
4945  }
4946  else
4947  {
4948  return VMA_NULL;
4949  }
4950 }
4951 
4952 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4953 {
4954  if(str != VMA_NULL)
4955  {
4956  const size_t len = strlen(str);
4957  vma_delete_array(allocs, str, len + 1);
4958  }
4959 }
4960 
4961 // STL-compatible allocator.
4962 template<typename T>
4963 class VmaStlAllocator
4964 {
4965 public:
4966  const VkAllocationCallbacks* const m_pCallbacks;
4967  typedef T value_type;
4968 
4969  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4970  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4971 
4972  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4973  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4974 
4975  template<typename U>
4976  bool operator==(const VmaStlAllocator<U>& rhs) const
4977  {
4978  return m_pCallbacks == rhs.m_pCallbacks;
4979  }
4980  template<typename U>
4981  bool operator!=(const VmaStlAllocator<U>& rhs) const
4982  {
4983  return m_pCallbacks != rhs.m_pCallbacks;
4984  }
4985 
4986  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4987  VmaStlAllocator(const VmaStlAllocator&) = default;
4988 };
4989 
4990 #if VMA_USE_STL_VECTOR
4991 
4992 #define VmaVector std::vector
4993 
4994 template<typename T, typename allocatorT>
4995 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4996 {
4997  vec.insert(vec.begin() + index, item);
4998 }
4999 
5000 template<typename T, typename allocatorT>
5001 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
5002 {
5003  vec.erase(vec.begin() + index);
5004 }
5005 
5006 #else // #if VMA_USE_STL_VECTOR
5007 
5008 /* Class with interface compatible with subset of std::vector.
5009 T must be POD because constructors and destructors are not called and memcpy is
5010 used for these objects. */
5011 template<typename T, typename AllocatorT>
5012 class VmaVector
5013 {
5014 public:
5015  typedef T value_type;
5016 
5017  VmaVector(const AllocatorT& allocator) :
5018  m_Allocator(allocator),
5019  m_pArray(VMA_NULL),
5020  m_Count(0),
5021  m_Capacity(0)
5022  {
5023  }
5024 
5025  VmaVector(size_t count, const AllocatorT& allocator) :
5026  m_Allocator(allocator),
5027  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
5028  m_Count(count),
5029  m_Capacity(count)
5030  {
5031  }
5032 
5033  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
5034  // value is unused.
5035  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
5036  : VmaVector(count, allocator) {}
5037 
5038  VmaVector(const VmaVector<T, AllocatorT>& src) :
5039  m_Allocator(src.m_Allocator),
5040  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
5041  m_Count(src.m_Count),
5042  m_Capacity(src.m_Count)
5043  {
5044  if(m_Count != 0)
5045  {
5046  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
5047  }
5048  }
5049 
5050  ~VmaVector()
5051  {
5052  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5053  }
5054 
5055  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
5056  {
5057  if(&rhs != this)
5058  {
5059  resize(rhs.m_Count);
5060  if(m_Count != 0)
5061  {
5062  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
5063  }
5064  }
5065  return *this;
5066  }
5067 
5068  bool empty() const { return m_Count == 0; }
5069  size_t size() const { return m_Count; }
5070  T* data() { return m_pArray; }
5071  const T* data() const { return m_pArray; }
5072 
5073  T& operator[](size_t index)
5074  {
5075  VMA_HEAVY_ASSERT(index < m_Count);
5076  return m_pArray[index];
5077  }
5078  const T& operator[](size_t index) const
5079  {
5080  VMA_HEAVY_ASSERT(index < m_Count);
5081  return m_pArray[index];
5082  }
5083 
5084  T& front()
5085  {
5086  VMA_HEAVY_ASSERT(m_Count > 0);
5087  return m_pArray[0];
5088  }
5089  const T& front() const
5090  {
5091  VMA_HEAVY_ASSERT(m_Count > 0);
5092  return m_pArray[0];
5093  }
5094  T& back()
5095  {
5096  VMA_HEAVY_ASSERT(m_Count > 0);
5097  return m_pArray[m_Count - 1];
5098  }
5099  const T& back() const
5100  {
5101  VMA_HEAVY_ASSERT(m_Count > 0);
5102  return m_pArray[m_Count - 1];
5103  }
5104 
5105  void reserve(size_t newCapacity, bool freeMemory = false)
5106  {
5107  newCapacity = VMA_MAX(newCapacity, m_Count);
5108 
5109  if((newCapacity < m_Capacity) && !freeMemory)
5110  {
5111  newCapacity = m_Capacity;
5112  }
5113 
5114  if(newCapacity != m_Capacity)
5115  {
5116  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
5117  if(m_Count != 0)
5118  {
5119  memcpy(newArray, m_pArray, m_Count * sizeof(T));
5120  }
5121  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5122  m_Capacity = newCapacity;
5123  m_pArray = newArray;
5124  }
5125  }
5126 
5127  void resize(size_t newCount)
5128  {
5129  size_t newCapacity = m_Capacity;
5130  if(newCount > m_Capacity)
5131  {
5132  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
5133  }
5134 
5135  if(newCapacity != m_Capacity)
5136  {
5137  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
5138  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
5139  if(elementsToCopy != 0)
5140  {
5141  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
5142  }
5143  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5144  m_Capacity = newCapacity;
5145  m_pArray = newArray;
5146  }
5147 
5148  m_Count = newCount;
5149  }
5150 
5151  void clear()
5152  {
5153  resize(0);
5154  }
5155 
5156  void shrink_to_fit()
5157  {
5158  if(m_Capacity > m_Count)
5159  {
5160  T* newArray = VMA_NULL;
5161  if(m_Count > 0)
5162  {
5163  newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
5164  memcpy(newArray, m_pArray, m_Count * sizeof(T));
5165  }
5166  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5167  m_Capacity = m_Count;
5168  m_pArray = newArray;
5169  }
5170  }
5171 
5172  void insert(size_t index, const T& src)
5173  {
5174  VMA_HEAVY_ASSERT(index <= m_Count);
5175  const size_t oldCount = size();
5176  resize(oldCount + 1);
5177  if(index < oldCount)
5178  {
5179  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
5180  }
5181  m_pArray[index] = src;
5182  }
5183 
5184  void remove(size_t index)
5185  {
5186  VMA_HEAVY_ASSERT(index < m_Count);
5187  const size_t oldCount = size();
5188  if(index < oldCount - 1)
5189  {
5190  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
5191  }
5192  resize(oldCount - 1);
5193  }
5194 
5195  void push_back(const T& src)
5196  {
5197  const size_t newIndex = size();
5198  resize(newIndex + 1);
5199  m_pArray[newIndex] = src;
5200  }
5201 
5202  void pop_back()
5203  {
5204  VMA_HEAVY_ASSERT(m_Count > 0);
5205  resize(size() - 1);
5206  }
5207 
5208  void push_front(const T& src)
5209  {
5210  insert(0, src);
5211  }
5212 
5213  void pop_front()
5214  {
5215  VMA_HEAVY_ASSERT(m_Count > 0);
5216  remove(0);
5217  }
5218 
5219  typedef T* iterator;
5220 
5221  iterator begin() { return m_pArray; }
5222  iterator end() { return m_pArray + m_Count; }
5223 
5224 private:
5225  AllocatorT m_Allocator;
5226  T* m_pArray;
5227  size_t m_Count;
5228  size_t m_Capacity;
5229 };
5230 
5231 template<typename T, typename allocatorT>
5232 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
5233 {
5234  vec.insert(index, item);
5235 }
5236 
5237 template<typename T, typename allocatorT>
5238 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
5239 {
5240  vec.remove(index);
5241 }
5242 
5243 #endif // #if VMA_USE_STL_VECTOR
5244 
5245 template<typename CmpLess, typename VectorT>
5246 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
5247 {
5248  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5249  vector.data(),
5250  vector.data() + vector.size(),
5251  value,
5252  CmpLess()) - vector.data();
5253  VmaVectorInsert(vector, indexToInsert, value);
5254  return indexToInsert;
5255 }
5256 
5257 template<typename CmpLess, typename VectorT>
5258 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
5259 {
5260  CmpLess comparator;
5261  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
5262  vector.begin(),
5263  vector.end(),
5264  value,
5265  comparator);
5266  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
5267  {
5268  size_t indexToRemove = it - vector.begin();
5269  VmaVectorRemove(vector, indexToRemove);
5270  return true;
5271  }
5272  return false;
5273 }
5274 
5276 // class VmaSmallVector
5277 
5278 /*
5279 This is a vector (a variable-sized array), optimized for the case when the array is small.
5280 
5281 It contains some number of elements in-place, which allows it to avoid heap allocation
5282 when the actual number of elements is below that threshold. This allows normal "small"
5283 cases to be fast without losing generality for large inputs.
5284 */
5285 
5286 template<typename T, typename AllocatorT, size_t N>
5287 class VmaSmallVector
5288 {
5289 public:
5290  typedef T value_type;
5291 
5292  VmaSmallVector(const AllocatorT& allocator) :
5293  m_Count(0),
5294  m_DynamicArray(allocator)
5295  {
5296  }
5297  VmaSmallVector(size_t count, const AllocatorT& allocator) :
5298  m_Count(count),
5299  m_DynamicArray(count > N ? count : 0, allocator)
5300  {
5301  }
5302  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5303  VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) = delete;
5304  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5305  VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) = delete;
5306 
5307  bool empty() const { return m_Count == 0; }
5308  size_t size() const { return m_Count; }
5309  T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5310  const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5311 
5312  T& operator[](size_t index)
5313  {
5314  VMA_HEAVY_ASSERT(index < m_Count);
5315  return data()[index];
5316  }
5317  const T& operator[](size_t index) const
5318  {
5319  VMA_HEAVY_ASSERT(index < m_Count);
5320  return data()[index];
5321  }
5322 
5323  T& front()
5324  {
5325  VMA_HEAVY_ASSERT(m_Count > 0);
5326  return data()[0];
5327  }
5328  const T& front() const
5329  {
5330  VMA_HEAVY_ASSERT(m_Count > 0);
5331  return data()[0];
5332  }
5333  T& back()
5334  {
5335  VMA_HEAVY_ASSERT(m_Count > 0);
5336  return data()[m_Count - 1];
5337  }
5338  const T& back() const
5339  {
5340  VMA_HEAVY_ASSERT(m_Count > 0);
5341  return data()[m_Count - 1];
5342  }
5343 
5344  void resize(size_t newCount, bool freeMemory = false)
5345  {
5346  if(newCount > N && m_Count > N)
5347  {
5348  // Any direction, staying in m_DynamicArray
5349  m_DynamicArray.resize(newCount);
5350  if(freeMemory)
5351  {
5352  m_DynamicArray.shrink_to_fit();
5353  }
5354  }
5355  else if(newCount > N && m_Count <= N)
5356  {
5357  // Growing, moving from m_StaticArray to m_DynamicArray
5358  m_DynamicArray.resize(newCount);
5359  if(m_Count > 0)
5360  {
5361  memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
5362  }
5363  }
5364  else if(newCount <= N && m_Count > N)
5365  {
5366  // Shrinking, moving from m_DynamicArray to m_StaticArray
5367  if(newCount > 0)
5368  {
5369  memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
5370  }
5371  m_DynamicArray.resize(0);
5372  if(freeMemory)
5373  {
5374  m_DynamicArray.shrink_to_fit();
5375  }
5376  }
5377  else
5378  {
5379  // Any direction, staying in m_StaticArray - nothing to do here
5380  }
5381  m_Count = newCount;
5382  }
5383 
5384  void clear(bool freeMemory = false)
5385  {
5386  m_DynamicArray.clear();
5387  if(freeMemory)
5388  {
5389  m_DynamicArray.shrink_to_fit();
5390  }
5391  m_Count = 0;
5392  }
5393 
5394  void insert(size_t index, const T& src)
5395  {
5396  VMA_HEAVY_ASSERT(index <= m_Count);
5397  const size_t oldCount = size();
5398  resize(oldCount + 1);
5399  T* const dataPtr = data();
5400  if(index < oldCount)
5401  {
5402  // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
5403  memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
5404  }
5405  dataPtr[index] = src;
5406  }
5407 
5408  void remove(size_t index)
5409  {
5410  VMA_HEAVY_ASSERT(index < m_Count);
5411  const size_t oldCount = size();
5412  if(index < oldCount - 1)
5413  {
5414  // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
5415  T* const dataPtr = data();
5416  memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
5417  }
5418  resize(oldCount - 1);
5419  }
5420 
5421  void push_back(const T& src)
5422  {
5423  const size_t newIndex = size();
5424  resize(newIndex + 1);
5425  data()[newIndex] = src;
5426  }
5427 
5428  void pop_back()
5429  {
5430  VMA_HEAVY_ASSERT(m_Count > 0);
5431  resize(size() - 1);
5432  }
5433 
5434  void push_front(const T& src)
5435  {
5436  insert(0, src);
5437  }
5438 
5439  void pop_front()
5440  {
5441  VMA_HEAVY_ASSERT(m_Count > 0);
5442  remove(0);
5443  }
5444 
5445  typedef T* iterator;
5446 
5447  iterator begin() { return data(); }
5448  iterator end() { return data() + m_Count; }
5449 
5450 private:
5451  size_t m_Count;
5452  T m_StaticArray[N]; // Used when m_Size <= N
5453  VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
5454 };
5455 
5457 // class VmaPoolAllocator
5458 
5459 /*
5460 Allocator for objects of type T using a list of arrays (pools) to speed up
5461 allocation. Number of elements that can be allocated is not bounded because
5462 allocator can create multiple blocks.
5463 */
5464 template<typename T>
5465 class VmaPoolAllocator
5466 {
5467  VMA_CLASS_NO_COPY(VmaPoolAllocator)
5468 public:
5469  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
5470  ~VmaPoolAllocator();
5471  template<typename... Types> T* Alloc(Types... args);
5472  void Free(T* ptr);
5473 
5474 private:
5475  union Item
5476  {
5477  uint32_t NextFreeIndex;
5478  alignas(T) char Value[sizeof(T)];
5479  };
5480 
5481  struct ItemBlock
5482  {
5483  Item* pItems;
5484  uint32_t Capacity;
5485  uint32_t FirstFreeIndex;
5486  };
5487 
5488  const VkAllocationCallbacks* m_pAllocationCallbacks;
5489  const uint32_t m_FirstBlockCapacity;
5490  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
5491 
5492  ItemBlock& CreateNewBlock();
5493 };
5494 
5495 template<typename T>
5496 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
5497  m_pAllocationCallbacks(pAllocationCallbacks),
5498  m_FirstBlockCapacity(firstBlockCapacity),
5499  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
5500 {
5501  VMA_ASSERT(m_FirstBlockCapacity > 1);
5502 }
5503 
5504 template<typename T>
5505 VmaPoolAllocator<T>::~VmaPoolAllocator()
5506 {
5507  for(size_t i = m_ItemBlocks.size(); i--; )
5508  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
5509  m_ItemBlocks.clear();
5510 }
5511 
5512 template<typename T>
5513 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
5514 {
5515  for(size_t i = m_ItemBlocks.size(); i--; )
5516  {
5517  ItemBlock& block = m_ItemBlocks[i];
5518  // This block has some free items: Use first one.
5519  if(block.FirstFreeIndex != UINT32_MAX)
5520  {
5521  Item* const pItem = &block.pItems[block.FirstFreeIndex];
5522  block.FirstFreeIndex = pItem->NextFreeIndex;
5523  T* result = (T*)&pItem->Value;
5524  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5525  return result;
5526  }
5527  }
5528 
5529  // No block has free item: Create new one and use it.
5530  ItemBlock& newBlock = CreateNewBlock();
5531  Item* const pItem = &newBlock.pItems[0];
5532  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
5533  T* result = (T*)&pItem->Value;
5534  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5535  return result;
5536 }
5537 
5538 template<typename T>
5539 void VmaPoolAllocator<T>::Free(T* ptr)
5540 {
5541  // Search all memory blocks to find ptr.
5542  for(size_t i = m_ItemBlocks.size(); i--; )
5543  {
5544  ItemBlock& block = m_ItemBlocks[i];
5545 
5546  // Casting to union.
5547  Item* pItemPtr;
5548  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
5549 
5550  // Check if pItemPtr is in address range of this block.
5551  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
5552  {
5553  ptr->~T(); // Explicit destructor call.
5554  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
5555  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
5556  block.FirstFreeIndex = index;
5557  return;
5558  }
5559  }
5560  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
5561 }
5562 
5563 template<typename T>
5564 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
5565 {
5566  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
5567  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
5568 
5569  const ItemBlock newBlock = {
5570  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
5571  newBlockCapacity,
5572  0 };
5573 
5574  m_ItemBlocks.push_back(newBlock);
5575 
5576  // Setup singly-linked list of all free items in this block.
5577  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
5578  newBlock.pItems[i].NextFreeIndex = i + 1;
5579  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
5580  return m_ItemBlocks.back();
5581 }
5582 
5584 // class VmaRawList, VmaList
5585 
5586 #if VMA_USE_STL_LIST
5587 
5588 #define VmaList std::list
5589 
5590 #else // #if VMA_USE_STL_LIST
5591 
5592 template<typename T>
5593 struct VmaListItem
5594 {
5595  VmaListItem* pPrev;
5596  VmaListItem* pNext;
5597  T Value;
5598 };
5599 
5600 // Doubly linked list.
5601 template<typename T>
5602 class VmaRawList
5603 {
5604  VMA_CLASS_NO_COPY(VmaRawList)
5605 public:
5606  typedef VmaListItem<T> ItemType;
5607 
5608  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
5609  ~VmaRawList();
5610  void Clear();
5611 
5612  size_t GetCount() const { return m_Count; }
5613  bool IsEmpty() const { return m_Count == 0; }
5614 
5615  ItemType* Front() { return m_pFront; }
5616  const ItemType* Front() const { return m_pFront; }
5617  ItemType* Back() { return m_pBack; }
5618  const ItemType* Back() const { return m_pBack; }
5619 
5620  ItemType* PushBack();
5621  ItemType* PushFront();
5622  ItemType* PushBack(const T& value);
5623  ItemType* PushFront(const T& value);
5624  void PopBack();
5625  void PopFront();
5626 
5627  // Item can be null - it means PushBack.
5628  ItemType* InsertBefore(ItemType* pItem);
5629  // Item can be null - it means PushFront.
5630  ItemType* InsertAfter(ItemType* pItem);
5631 
5632  ItemType* InsertBefore(ItemType* pItem, const T& value);
5633  ItemType* InsertAfter(ItemType* pItem, const T& value);
5634 
5635  void Remove(ItemType* pItem);
5636 
5637 private:
5638  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5639  VmaPoolAllocator<ItemType> m_ItemAllocator;
5640  ItemType* m_pFront;
5641  ItemType* m_pBack;
5642  size_t m_Count;
5643 };
5644 
5645 template<typename T>
5646 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5647  m_pAllocationCallbacks(pAllocationCallbacks),
5648  m_ItemAllocator(pAllocationCallbacks, 128),
5649  m_pFront(VMA_NULL),
5650  m_pBack(VMA_NULL),
5651  m_Count(0)
5652 {
5653 }
5654 
5655 template<typename T>
5656 VmaRawList<T>::~VmaRawList()
5657 {
5658  // Intentionally not calling Clear, because that would be unnecessary
5659  // computations to return all items to m_ItemAllocator as free.
5660 }
5661 
5662 template<typename T>
5663 void VmaRawList<T>::Clear()
5664 {
5665  if(IsEmpty() == false)
5666  {
5667  ItemType* pItem = m_pBack;
5668  while(pItem != VMA_NULL)
5669  {
5670  ItemType* const pPrevItem = pItem->pPrev;
5671  m_ItemAllocator.Free(pItem);
5672  pItem = pPrevItem;
5673  }
5674  m_pFront = VMA_NULL;
5675  m_pBack = VMA_NULL;
5676  m_Count = 0;
5677  }
5678 }
5679 
5680 template<typename T>
5681 VmaListItem<T>* VmaRawList<T>::PushBack()
5682 {
5683  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5684  pNewItem->pNext = VMA_NULL;
5685  if(IsEmpty())
5686  {
5687  pNewItem->pPrev = VMA_NULL;
5688  m_pFront = pNewItem;
5689  m_pBack = pNewItem;
5690  m_Count = 1;
5691  }
5692  else
5693  {
5694  pNewItem->pPrev = m_pBack;
5695  m_pBack->pNext = pNewItem;
5696  m_pBack = pNewItem;
5697  ++m_Count;
5698  }
5699  return pNewItem;
5700 }
5701 
5702 template<typename T>
5703 VmaListItem<T>* VmaRawList<T>::PushFront()
5704 {
5705  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5706  pNewItem->pPrev = VMA_NULL;
5707  if(IsEmpty())
5708  {
5709  pNewItem->pNext = VMA_NULL;
5710  m_pFront = pNewItem;
5711  m_pBack = pNewItem;
5712  m_Count = 1;
5713  }
5714  else
5715  {
5716  pNewItem->pNext = m_pFront;
5717  m_pFront->pPrev = pNewItem;
5718  m_pFront = pNewItem;
5719  ++m_Count;
5720  }
5721  return pNewItem;
5722 }
5723 
5724 template<typename T>
5725 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5726 {
5727  ItemType* const pNewItem = PushBack();
5728  pNewItem->Value = value;
5729  return pNewItem;
5730 }
5731 
5732 template<typename T>
5733 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5734 {
5735  ItemType* const pNewItem = PushFront();
5736  pNewItem->Value = value;
5737  return pNewItem;
5738 }
5739 
5740 template<typename T>
5741 void VmaRawList<T>::PopBack()
5742 {
5743  VMA_HEAVY_ASSERT(m_Count > 0);
5744  ItemType* const pBackItem = m_pBack;
5745  ItemType* const pPrevItem = pBackItem->pPrev;
5746  if(pPrevItem != VMA_NULL)
5747  {
5748  pPrevItem->pNext = VMA_NULL;
5749  }
5750  m_pBack = pPrevItem;
5751  m_ItemAllocator.Free(pBackItem);
5752  --m_Count;
5753 }
5754 
5755 template<typename T>
5756 void VmaRawList<T>::PopFront()
5757 {
5758  VMA_HEAVY_ASSERT(m_Count > 0);
5759  ItemType* const pFrontItem = m_pFront;
5760  ItemType* const pNextItem = pFrontItem->pNext;
5761  if(pNextItem != VMA_NULL)
5762  {
5763  pNextItem->pPrev = VMA_NULL;
5764  }
5765  m_pFront = pNextItem;
5766  m_ItemAllocator.Free(pFrontItem);
5767  --m_Count;
5768 }
5769 
5770 template<typename T>
5771 void VmaRawList<T>::Remove(ItemType* pItem)
5772 {
5773  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5774  VMA_HEAVY_ASSERT(m_Count > 0);
5775 
5776  if(pItem->pPrev != VMA_NULL)
5777  {
5778  pItem->pPrev->pNext = pItem->pNext;
5779  }
5780  else
5781  {
5782  VMA_HEAVY_ASSERT(m_pFront == pItem);
5783  m_pFront = pItem->pNext;
5784  }
5785 
5786  if(pItem->pNext != VMA_NULL)
5787  {
5788  pItem->pNext->pPrev = pItem->pPrev;
5789  }
5790  else
5791  {
5792  VMA_HEAVY_ASSERT(m_pBack == pItem);
5793  m_pBack = pItem->pPrev;
5794  }
5795 
5796  m_ItemAllocator.Free(pItem);
5797  --m_Count;
5798 }
5799 
5800 template<typename T>
5801 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5802 {
5803  if(pItem != VMA_NULL)
5804  {
5805  ItemType* const prevItem = pItem->pPrev;
5806  ItemType* const newItem = m_ItemAllocator.Alloc();
5807  newItem->pPrev = prevItem;
5808  newItem->pNext = pItem;
5809  pItem->pPrev = newItem;
5810  if(prevItem != VMA_NULL)
5811  {
5812  prevItem->pNext = newItem;
5813  }
5814  else
5815  {
5816  VMA_HEAVY_ASSERT(m_pFront == pItem);
5817  m_pFront = newItem;
5818  }
5819  ++m_Count;
5820  return newItem;
5821  }
5822  else
5823  return PushBack();
5824 }
5825 
5826 template<typename T>
5827 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5828 {
5829  if(pItem != VMA_NULL)
5830  {
5831  ItemType* const nextItem = pItem->pNext;
5832  ItemType* const newItem = m_ItemAllocator.Alloc();
5833  newItem->pNext = nextItem;
5834  newItem->pPrev = pItem;
5835  pItem->pNext = newItem;
5836  if(nextItem != VMA_NULL)
5837  {
5838  nextItem->pPrev = newItem;
5839  }
5840  else
5841  {
5842  VMA_HEAVY_ASSERT(m_pBack == pItem);
5843  m_pBack = newItem;
5844  }
5845  ++m_Count;
5846  return newItem;
5847  }
5848  else
5849  return PushFront();
5850 }
5851 
5852 template<typename T>
5853 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5854 {
5855  ItemType* const newItem = InsertBefore(pItem);
5856  newItem->Value = value;
5857  return newItem;
5858 }
5859 
5860 template<typename T>
5861 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5862 {
5863  ItemType* const newItem = InsertAfter(pItem);
5864  newItem->Value = value;
5865  return newItem;
5866 }
5867 
5868 template<typename T, typename AllocatorT>
5869 class VmaList
5870 {
5871  VMA_CLASS_NO_COPY(VmaList)
5872 public:
5873  class iterator
5874  {
5875  public:
5876  iterator() :
5877  m_pList(VMA_NULL),
5878  m_pItem(VMA_NULL)
5879  {
5880  }
5881 
5882  T& operator*() const
5883  {
5884  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5885  return m_pItem->Value;
5886  }
5887  T* operator->() const
5888  {
5889  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5890  return &m_pItem->Value;
5891  }
5892 
5893  iterator& operator++()
5894  {
5895  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5896  m_pItem = m_pItem->pNext;
5897  return *this;
5898  }
5899  iterator& operator--()
5900  {
5901  if(m_pItem != VMA_NULL)
5902  {
5903  m_pItem = m_pItem->pPrev;
5904  }
5905  else
5906  {
5907  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5908  m_pItem = m_pList->Back();
5909  }
5910  return *this;
5911  }
5912 
5913  iterator operator++(int)
5914  {
5915  iterator result = *this;
5916  ++*this;
5917  return result;
5918  }
5919  iterator operator--(int)
5920  {
5921  iterator result = *this;
5922  --*this;
5923  return result;
5924  }
5925 
5926  bool operator==(const iterator& rhs) const
5927  {
5928  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5929  return m_pItem == rhs.m_pItem;
5930  }
5931  bool operator!=(const iterator& rhs) const
5932  {
5933  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5934  return m_pItem != rhs.m_pItem;
5935  }
5936 
5937  private:
5938  VmaRawList<T>* m_pList;
5939  VmaListItem<T>* m_pItem;
5940 
5941  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5942  m_pList(pList),
5943  m_pItem(pItem)
5944  {
5945  }
5946 
5947  friend class VmaList<T, AllocatorT>;
5948  };
5949 
5950  class const_iterator
5951  {
5952  public:
5953  const_iterator() :
5954  m_pList(VMA_NULL),
5955  m_pItem(VMA_NULL)
5956  {
5957  }
5958 
5959  const_iterator(const iterator& src) :
5960  m_pList(src.m_pList),
5961  m_pItem(src.m_pItem)
5962  {
5963  }
5964 
5965  const T& operator*() const
5966  {
5967  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5968  return m_pItem->Value;
5969  }
5970  const T* operator->() const
5971  {
5972  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5973  return &m_pItem->Value;
5974  }
5975 
5976  const_iterator& operator++()
5977  {
5978  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5979  m_pItem = m_pItem->pNext;
5980  return *this;
5981  }
5982  const_iterator& operator--()
5983  {
5984  if(m_pItem != VMA_NULL)
5985  {
5986  m_pItem = m_pItem->pPrev;
5987  }
5988  else
5989  {
5990  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5991  m_pItem = m_pList->Back();
5992  }
5993  return *this;
5994  }
5995 
5996  const_iterator operator++(int)
5997  {
5998  const_iterator result = *this;
5999  ++*this;
6000  return result;
6001  }
6002  const_iterator operator--(int)
6003  {
6004  const_iterator result = *this;
6005  --*this;
6006  return result;
6007  }
6008 
6009  bool operator==(const const_iterator& rhs) const
6010  {
6011  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
6012  return m_pItem == rhs.m_pItem;
6013  }
6014  bool operator!=(const const_iterator& rhs) const
6015  {
6016  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
6017  return m_pItem != rhs.m_pItem;
6018  }
6019 
6020  private:
6021  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
6022  m_pList(pList),
6023  m_pItem(pItem)
6024  {
6025  }
6026 
6027  const VmaRawList<T>* m_pList;
6028  const VmaListItem<T>* m_pItem;
6029 
6030  friend class VmaList<T, AllocatorT>;
6031  };
6032 
6033  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
6034 
6035  bool empty() const { return m_RawList.IsEmpty(); }
6036  size_t size() const { return m_RawList.GetCount(); }
6037 
6038  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
6039  iterator end() { return iterator(&m_RawList, VMA_NULL); }
6040 
6041  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
6042  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
6043 
6044  void clear() { m_RawList.Clear(); }
6045  void push_back(const T& value) { m_RawList.PushBack(value); }
6046  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
6047  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
6048 
6049 private:
6050  VmaRawList<T> m_RawList;
6051 };
6052 
6053 #endif // #if VMA_USE_STL_LIST
6054 
6056 // class VmaIntrusiveLinkedList
6057 
6058 /*
6059 Expected interface of ItemTypeTraits:
6060 struct MyItemTypeTraits
6061 {
6062  typedef MyItem ItemType;
6063  static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
6064  static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
6065  static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
6066  static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
6067 };
6068 */
6069 template<typename ItemTypeTraits>
6070 class VmaIntrusiveLinkedList
6071 {
6072 public:
6073  typedef typename ItemTypeTraits::ItemType ItemType;
6074  static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
6075  static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
6076  // Movable, not copyable.
6077  VmaIntrusiveLinkedList() { }
6078  VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList<ItemTypeTraits>& src) = delete;
6079  VmaIntrusiveLinkedList(VmaIntrusiveLinkedList<ItemTypeTraits>&& src) :
6080  m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
6081  {
6082  src.m_Front = src.m_Back = VMA_NULL;
6083  src.m_Count = 0;
6084  }
6085  ~VmaIntrusiveLinkedList()
6086  {
6087  VMA_HEAVY_ASSERT(IsEmpty());
6088  }
6089  VmaIntrusiveLinkedList<ItemTypeTraits>& operator=(const VmaIntrusiveLinkedList<ItemTypeTraits>& src) = delete;
6090  VmaIntrusiveLinkedList<ItemTypeTraits>& operator=(VmaIntrusiveLinkedList<ItemTypeTraits>&& src)
6091  {
6092  if(&src != this)
6093  {
6094  VMA_HEAVY_ASSERT(IsEmpty());
6095  m_Front = src.m_Front;
6096  m_Back = src.m_Back;
6097  m_Count = src.m_Count;
6098  src.m_Front = src.m_Back = VMA_NULL;
6099  src.m_Count = 0;
6100  }
6101  return *this;
6102  }
6103  void RemoveAll()
6104  {
6105  if(!IsEmpty())
6106  {
6107  ItemType* item = m_Back;
6108  while(item != VMA_NULL)
6109  {
6110  ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
6111  ItemTypeTraits::AccessPrev(item) = VMA_NULL;
6112  ItemTypeTraits::AccessNext(item) = VMA_NULL;
6113  item = prevItem;
6114  }
6115  m_Front = VMA_NULL;
6116  m_Back = VMA_NULL;
6117  m_Count = 0;
6118  }
6119  }
6120  size_t GetCount() const { return m_Count; }
6121  bool IsEmpty() const { return m_Count == 0; }
6122  ItemType* Front() { return m_Front; }
6123  const ItemType* Front() const { return m_Front; }
6124  ItemType* Back() { return m_Back; }
6125  const ItemType* Back() const { return m_Back; }
6126  void PushBack(ItemType* item)
6127  {
6128  VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
6129  if(IsEmpty())
6130  {
6131  m_Front = item;
6132  m_Back = item;
6133  m_Count = 1;
6134  }
6135  else
6136  {
6137  ItemTypeTraits::AccessPrev(item) = m_Back;
6138  ItemTypeTraits::AccessNext(m_Back) = item;
6139  m_Back = item;
6140  ++m_Count;
6141  }
6142  }
6143  void PushFront(ItemType* item)
6144  {
6145  VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
6146  if(IsEmpty())
6147  {
6148  m_Front = item;
6149  m_Back = item;
6150  m_Count = 1;
6151  }
6152  else
6153  {
6154  ItemTypeTraits::AccessNext(item) = m_Front;
6155  ItemTypeTraits::AccessPrev(m_Front) = item;
6156  m_Front = item;
6157  ++m_Count;
6158  }
6159  }
6160  ItemType* PopBack()
6161  {
6162  VMA_HEAVY_ASSERT(m_Count > 0);
6163  ItemType* const backItem = m_Back;
6164  ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
6165  if(prevItem != VMA_NULL)
6166  {
6167  ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
6168  }
6169  m_Back = prevItem;
6170  --m_Count;
6171  ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
6172  ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
6173  return backItem;
6174  }
6175  ItemType* PopFront()
6176  {
6177  VMA_HEAVY_ASSERT(m_Count > 0);
6178  ItemType* const frontItem = m_Front;
6179  ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
6180  if(nextItem != VMA_NULL)
6181  {
6182  ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
6183  }
6184  m_Front = nextItem;
6185  --m_Count;
6186  ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
6187  ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
6188  return frontItem;
6189  }
6190 
6191  // MyItem can be null - it means PushBack.
6192  void InsertBefore(ItemType* existingItem, ItemType* newItem)
6193  {
6194  VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
6195  if(existingItem != VMA_NULL)
6196  {
6197  ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
6198  ItemTypeTraits::AccessPrev(newItem) = prevItem;
6199  ItemTypeTraits::AccessNext(newItem) = existingItem;
6200  ItemTypeTraits::AccessPrev(existingItem) = newItem;
6201  if(prevItem != VMA_NULL)
6202  {
6203  ItemTypeTraits::AccessNext(prevItem) = newItem;
6204  }
6205  else
6206  {
6207  VMA_HEAVY_ASSERT(m_Front == existingItem);
6208  m_Front = newItem;
6209  }
6210  ++m_Count;
6211  }
6212  else
6213  PushBack(newItem);
6214  }
6215  // MyItem can be null - it means PushFront.
6216  void InsertAfter(ItemType* existingItem, ItemType* newItem)
6217  {
6218  VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
6219  if(existingItem != VMA_NULL)
6220  {
6221  ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
6222  ItemTypeTraits::AccessNext(newItem) = nextItem;
6223  ItemTypeTraits::AccessPrev(newItem) = existingItem;
6224  ItemTypeTraits::AccessNext(existingItem) = newItem;
6225  if(nextItem != VMA_NULL)
6226  {
6227  ItemTypeTraits::AccessPrev(nextItem) = newItem;
6228  }
6229  else
6230  {
6231  VMA_HEAVY_ASSERT(m_Back == existingItem);
6232  m_Back = newItem;
6233  }
6234  ++m_Count;
6235  }
6236  else
6237  return PushFront(newItem);
6238  }
6239  void Remove(ItemType* item)
6240  {
6241  VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
6242  if(ItemTypeTraits::GetPrev(item) != VMA_NULL)
6243  {
6244  ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
6245  }
6246  else
6247  {
6248  VMA_HEAVY_ASSERT(m_Front == item);
6249  m_Front = ItemTypeTraits::GetNext(item);
6250  }
6251 
6252  if(ItemTypeTraits::GetNext(item) != VMA_NULL)
6253  {
6254  ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
6255  }
6256  else
6257  {
6258  VMA_HEAVY_ASSERT(m_Back == item);
6259  m_Back = ItemTypeTraits::GetPrev(item);
6260  }
6261  ItemTypeTraits::AccessPrev(item) = VMA_NULL;
6262  ItemTypeTraits::AccessNext(item) = VMA_NULL;
6263  --m_Count;
6264  }
6265 private:
6266  ItemType* m_Front = VMA_NULL;
6267  ItemType* m_Back = VMA_NULL;
6268  size_t m_Count = 0;
6269 };
6270 
6272 // class VmaMap
6273 
6274 // Unused in this version.
6275 #if 0
6276 
6277 #if VMA_USE_STL_UNORDERED_MAP
6278 
6279 #define VmaPair std::pair
6280 
6281 #define VMA_MAP_TYPE(KeyT, ValueT) \
6282  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
6283 
6284 #else // #if VMA_USE_STL_UNORDERED_MAP
6285 
6286 template<typename T1, typename T2>
6287 struct VmaPair
6288 {
6289  T1 first;
6290  T2 second;
6291 
6292  VmaPair() : first(), second() { }
6293  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
6294 };
6295 
6296 /* Class compatible with subset of interface of std::unordered_map.
6297 KeyT, ValueT must be POD because they will be stored in VmaVector.
6298 */
6299 template<typename KeyT, typename ValueT>
6300 class VmaMap
6301 {
6302 public:
6303  typedef VmaPair<KeyT, ValueT> PairType;
6304  typedef PairType* iterator;
6305 
6306  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
6307 
6308  iterator begin() { return m_Vector.begin(); }
6309  iterator end() { return m_Vector.end(); }
6310 
6311  void insert(const PairType& pair);
6312  iterator find(const KeyT& key);
6313  void erase(iterator it);
6314 
6315 private:
6316  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
6317 };
6318 
6319 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
6320 
6321 template<typename FirstT, typename SecondT>
6322 struct VmaPairFirstLess
6323 {
6324  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
6325  {
6326  return lhs.first < rhs.first;
6327  }
6328  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
6329  {
6330  return lhs.first < rhsFirst;
6331  }
6332 };
6333 
6334 template<typename KeyT, typename ValueT>
6335 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
6336 {
6337  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
6338  m_Vector.data(),
6339  m_Vector.data() + m_Vector.size(),
6340  pair,
6341  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
6342  VmaVectorInsert(m_Vector, indexToInsert, pair);
6343 }
6344 
6345 template<typename KeyT, typename ValueT>
6346 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
6347 {
6348  PairType* it = VmaBinaryFindFirstNotLess(
6349  m_Vector.data(),
6350  m_Vector.data() + m_Vector.size(),
6351  key,
6352  VmaPairFirstLess<KeyT, ValueT>());
6353  if((it != m_Vector.end()) && (it->first == key))
6354  {
6355  return it;
6356  }
6357  else
6358  {
6359  return m_Vector.end();
6360  }
6361 }
6362 
6363 template<typename KeyT, typename ValueT>
6364 void VmaMap<KeyT, ValueT>::erase(iterator it)
6365 {
6366  VmaVectorRemove(m_Vector, it - m_Vector.begin());
6367 }
6368 
6369 #endif // #if VMA_USE_STL_UNORDERED_MAP
6370 
6371 #endif // #if 0
6372 
6374 
6375 class VmaDeviceMemoryBlock;
6376 
6377 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
6378 
6379 struct VmaAllocation_T
6380 {
6381 private:
6382  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
6383 
6384  enum FLAGS
6385  {
6386  FLAG_USER_DATA_STRING = 0x01,
6387  };
6388 
6389 public:
6390  enum ALLOCATION_TYPE
6391  {
6392  ALLOCATION_TYPE_NONE,
6393  ALLOCATION_TYPE_BLOCK,
6394  ALLOCATION_TYPE_DEDICATED,
6395  };
6396 
6397  /*
6398  This struct is allocated using VmaPoolAllocator.
6399  */
6400 
6401  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
6402  m_Alignment{1},
6403  m_Size{0},
6404  m_pUserData{VMA_NULL},
6405  m_LastUseFrameIndex{currentFrameIndex},
6406  m_MemoryTypeIndex{0},
6407  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
6408  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
6409  m_MapCount{0},
6410  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
6411  {
6412 #if VMA_STATS_STRING_ENABLED
6413  m_CreationFrameIndex = currentFrameIndex;
6414  m_BufferImageUsage = 0;
6415 #endif
6416  }
6417 
6418  ~VmaAllocation_T()
6419  {
6420  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
6421 
6422  // Check if owned string was freed.
6423  VMA_ASSERT(m_pUserData == VMA_NULL);
6424  }
6425 
6426  void InitBlockAllocation(
6427  VmaDeviceMemoryBlock* block,
6428  VkDeviceSize offset,
6429  VkDeviceSize alignment,
6430  VkDeviceSize size,
6431  uint32_t memoryTypeIndex,
6432  VmaSuballocationType suballocationType,
6433  bool mapped,
6434  bool canBecomeLost)
6435  {
6436  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6437  VMA_ASSERT(block != VMA_NULL);
6438  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6439  m_Alignment = alignment;
6440  m_Size = size;
6441  m_MemoryTypeIndex = memoryTypeIndex;
6442  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6443  m_SuballocationType = (uint8_t)suballocationType;
6444  m_BlockAllocation.m_Block = block;
6445  m_BlockAllocation.m_Offset = offset;
6446  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
6447  }
6448 
6449  void InitLost()
6450  {
6451  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6452  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
6453  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6454  m_MemoryTypeIndex = 0;
6455  m_BlockAllocation.m_Block = VMA_NULL;
6456  m_BlockAllocation.m_Offset = 0;
6457  m_BlockAllocation.m_CanBecomeLost = true;
6458  }
6459 
6460  void ChangeBlockAllocation(
6461  VmaAllocator hAllocator,
6462  VmaDeviceMemoryBlock* block,
6463  VkDeviceSize offset);
6464 
6465  void ChangeOffset(VkDeviceSize newOffset);
6466 
6467  // pMappedData not null means allocation is created with MAPPED flag.
6468  void InitDedicatedAllocation(
6469  uint32_t memoryTypeIndex,
6470  VkDeviceMemory hMemory,
6471  VmaSuballocationType suballocationType,
6472  void* pMappedData,
6473  VkDeviceSize size)
6474  {
6475  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6476  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
6477  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
6478  m_Alignment = 0;
6479  m_Size = size;
6480  m_MemoryTypeIndex = memoryTypeIndex;
6481  m_SuballocationType = (uint8_t)suballocationType;
6482  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6483  m_DedicatedAllocation.m_hMemory = hMemory;
6484  m_DedicatedAllocation.m_pMappedData = pMappedData;
6485  m_DedicatedAllocation.m_Prev = VMA_NULL;
6486  m_DedicatedAllocation.m_Next = VMA_NULL;
6487  }
6488 
6489  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
6490  VkDeviceSize GetAlignment() const { return m_Alignment; }
6491  VkDeviceSize GetSize() const { return m_Size; }
6492  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
6493  void* GetUserData() const { return m_pUserData; }
6494  void SetUserData(VmaAllocator hAllocator, void* pUserData);
6495  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
6496 
6497  VmaDeviceMemoryBlock* GetBlock() const
6498  {
6499  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6500  return m_BlockAllocation.m_Block;
6501  }
6502  VkDeviceSize GetOffset() const;
6503  VkDeviceMemory GetMemory() const;
6504  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6505  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
6506  void* GetMappedData() const;
6507  bool CanBecomeLost() const;
6508 
6509  uint32_t GetLastUseFrameIndex() const
6510  {
6511  return m_LastUseFrameIndex.load();
6512  }
6513  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
6514  {
6515  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
6516  }
6517  /*
6518  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
6519  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
6520  - Else, returns false.
6521 
6522  If hAllocation is already lost, assert - you should not call it then.
6523  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
6524  */
6525  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6526 
6527  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
6528  {
6529  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
6530  outInfo.blockCount = 1;
6531  outInfo.allocationCount = 1;
6532  outInfo.unusedRangeCount = 0;
6533  outInfo.usedBytes = m_Size;
6534  outInfo.unusedBytes = 0;
6535  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
6536  outInfo.unusedRangeSizeMin = UINT64_MAX;
6537  outInfo.unusedRangeSizeMax = 0;
6538  }
6539 
6540  void BlockAllocMap();
6541  void BlockAllocUnmap();
6542  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
6543  void DedicatedAllocUnmap(VmaAllocator hAllocator);
6544 
6545 #if VMA_STATS_STRING_ENABLED
6546  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
6547  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
6548 
6549  void InitBufferImageUsage(uint32_t bufferImageUsage)
6550  {
6551  VMA_ASSERT(m_BufferImageUsage == 0);
6552  m_BufferImageUsage = bufferImageUsage;
6553  }
6554 
6555  void PrintParameters(class VmaJsonWriter& json) const;
6556 #endif
6557 
6558 private:
6559  VkDeviceSize m_Alignment;
6560  VkDeviceSize m_Size;
6561  void* m_pUserData;
6562  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
6563  uint32_t m_MemoryTypeIndex;
6564  uint8_t m_Type; // ALLOCATION_TYPE
6565  uint8_t m_SuballocationType; // VmaSuballocationType
6566  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
6567  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
6568  uint8_t m_MapCount;
6569  uint8_t m_Flags; // enum FLAGS
6570 
6571  // Allocation out of VmaDeviceMemoryBlock.
6572  struct BlockAllocation
6573  {
6574  VmaDeviceMemoryBlock* m_Block;
6575  VkDeviceSize m_Offset;
6576  bool m_CanBecomeLost;
6577  };
6578 
6579  // Allocation for an object that has its own private VkDeviceMemory.
6580  struct DedicatedAllocation
6581  {
6582  VkDeviceMemory m_hMemory;
6583  void* m_pMappedData; // Not null means memory is mapped.
6584  VmaAllocation_T* m_Prev;
6585  VmaAllocation_T* m_Next;
6586  };
6587 
6588  union
6589  {
6590  // Allocation out of VmaDeviceMemoryBlock.
6591  BlockAllocation m_BlockAllocation;
6592  // Allocation for an object that has its own private VkDeviceMemory.
6593  DedicatedAllocation m_DedicatedAllocation;
6594  };
6595 
6596 #if VMA_STATS_STRING_ENABLED
6597  uint32_t m_CreationFrameIndex;
6598  uint32_t m_BufferImageUsage; // 0 if unknown.
6599 #endif
6600 
6601  void FreeUserDataString(VmaAllocator hAllocator);
6602 
6603  friend struct VmaDedicatedAllocationListItemTraits;
6604 };
6605 
6606 struct VmaDedicatedAllocationListItemTraits
6607 {
6608  typedef VmaAllocation_T ItemType;
6609  static ItemType* GetPrev(const ItemType* item)
6610  {
6611  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6612  return item->m_DedicatedAllocation.m_Prev;
6613  }
6614  static ItemType* GetNext(const ItemType* item)
6615  {
6616  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6617  return item->m_DedicatedAllocation.m_Next;
6618  }
6619  static ItemType*& AccessPrev(ItemType* item)
6620  {
6621  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6622  return item->m_DedicatedAllocation.m_Prev;
6623  }
6624  static ItemType*& AccessNext(ItemType* item){
6625  VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
6626  return item->m_DedicatedAllocation.m_Next;
6627  }
6628 };
6629 
6630 /*
6631 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
6632 allocated memory block or free.
6633 */
6634 struct VmaSuballocation
6635 {
6636  VkDeviceSize offset;
6637  VkDeviceSize size;
6638  VmaAllocation hAllocation;
6639  VmaSuballocationType type;
6640 };
6641 
6642 // Comparator for offsets.
6643 struct VmaSuballocationOffsetLess
6644 {
6645  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6646  {
6647  return lhs.offset < rhs.offset;
6648  }
6649 };
6650 struct VmaSuballocationOffsetGreater
6651 {
6652  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6653  {
6654  return lhs.offset > rhs.offset;
6655  }
6656 };
6657 
6658 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
6659 
6660 // Cost of one additional allocation lost, as equivalent in bytes.
6661 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
6662 
6663 enum class VmaAllocationRequestType
6664 {
6665  Normal,
6666  // Used by "Linear" algorithm.
6667  UpperAddress,
6668  EndOf1st,
6669  EndOf2nd,
6670 };
6671 
6672 /*
6673 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
6674 
6675 If canMakeOtherLost was false:
6676 - item points to a FREE suballocation.
6677 - itemsToMakeLostCount is 0.
6678 
6679 If canMakeOtherLost was true:
6680 - item points to first of sequence of suballocations, which are either FREE,
6681  or point to VmaAllocations that can become lost.
6682 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
6683  the requested allocation to succeed.
6684 */
6685 struct VmaAllocationRequest
6686 {
6687  VkDeviceSize offset;
6688  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
6689  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
6690  VmaSuballocationList::iterator item;
6691  size_t itemsToMakeLostCount;
6692  void* customData;
6693  VmaAllocationRequestType type;
6694 
6695  VkDeviceSize CalcCost() const
6696  {
6697  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
6698  }
6699 };
6700 
6701 /*
6702 Data structure used for bookkeeping of allocations and unused ranges of memory
6703 in a single VkDeviceMemory block.
6704 */
6705 class VmaBlockMetadata
6706 {
6707 public:
6708  VmaBlockMetadata(VmaAllocator hAllocator);
6709  virtual ~VmaBlockMetadata() { }
6710  virtual void Init(VkDeviceSize size) { m_Size = size; }
6711 
6712  // Validates all data structures inside this object. If not valid, returns false.
6713  virtual bool Validate() const = 0;
6714  VkDeviceSize GetSize() const { return m_Size; }
6715  virtual size_t GetAllocationCount() const = 0;
6716  virtual VkDeviceSize GetSumFreeSize() const = 0;
6717  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
6718  // Returns true if this block is empty - contains only single free suballocation.
6719  virtual bool IsEmpty() const = 0;
6720 
6721  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
6722  // Shouldn't modify blockCount.
6723  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
6724 
6725 #if VMA_STATS_STRING_ENABLED
6726  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
6727 #endif
6728 
6729  // Tries to find a place for suballocation with given parameters inside this block.
6730  // If succeeded, fills pAllocationRequest and returns true.
6731  // If failed, returns false.
6732  virtual bool CreateAllocationRequest(
6733  uint32_t currentFrameIndex,
6734  uint32_t frameInUseCount,
6735  VkDeviceSize bufferImageGranularity,
6736  VkDeviceSize allocSize,
6737  VkDeviceSize allocAlignment,
6738  bool upperAddress,
6739  VmaSuballocationType allocType,
6740  bool canMakeOtherLost,
6741  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6742  uint32_t strategy,
6743  VmaAllocationRequest* pAllocationRequest) = 0;
6744 
6745  virtual bool MakeRequestedAllocationsLost(
6746  uint32_t currentFrameIndex,
6747  uint32_t frameInUseCount,
6748  VmaAllocationRequest* pAllocationRequest) = 0;
6749 
6750  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
6751 
6752  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6753 
6754  // Makes actual allocation based on request. Request must already be checked and valid.
6755  virtual void Alloc(
6756  const VmaAllocationRequest& request,
6757  VmaSuballocationType type,
6758  VkDeviceSize allocSize,
6759  VmaAllocation hAllocation) = 0;
6760 
6761  // Frees suballocation assigned to given memory region.
6762  virtual void Free(const VmaAllocation allocation) = 0;
6763  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
6764 
6765 protected:
6766  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
6767 
6768 #if VMA_STATS_STRING_ENABLED
6769  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6770  VkDeviceSize unusedBytes,
6771  size_t allocationCount,
6772  size_t unusedRangeCount) const;
6773  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6774  VkDeviceSize offset,
6775  VmaAllocation hAllocation) const;
6776  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6777  VkDeviceSize offset,
6778  VkDeviceSize size) const;
6779  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6780 #endif
6781 
6782 private:
6783  VkDeviceSize m_Size;
6784  const VkAllocationCallbacks* m_pAllocationCallbacks;
6785 };
6786 
6787 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
6788  VMA_ASSERT(0 && "Validation failed: " #cond); \
6789  return false; \
6790  } } while(false)
6791 
6792 class VmaBlockMetadata_Generic : public VmaBlockMetadata
6793 {
6794  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
6795 public:
6796  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
6797  virtual ~VmaBlockMetadata_Generic();
6798  virtual void Init(VkDeviceSize size);
6799 
6800  virtual bool Validate() const;
6801  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
6802  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6803  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6804  virtual bool IsEmpty() const;
6805 
6806  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6807  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6808 
6809 #if VMA_STATS_STRING_ENABLED
6810  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6811 #endif
6812 
6813  virtual bool CreateAllocationRequest(
6814  uint32_t currentFrameIndex,
6815  uint32_t frameInUseCount,
6816  VkDeviceSize bufferImageGranularity,
6817  VkDeviceSize allocSize,
6818  VkDeviceSize allocAlignment,
6819  bool upperAddress,
6820  VmaSuballocationType allocType,
6821  bool canMakeOtherLost,
6822  uint32_t strategy,
6823  VmaAllocationRequest* pAllocationRequest);
6824 
6825  virtual bool MakeRequestedAllocationsLost(
6826  uint32_t currentFrameIndex,
6827  uint32_t frameInUseCount,
6828  VmaAllocationRequest* pAllocationRequest);
6829 
6830  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6831 
6832  virtual VkResult CheckCorruption(const void* pBlockData);
6833 
6834  virtual void Alloc(
6835  const VmaAllocationRequest& request,
6836  VmaSuballocationType type,
6837  VkDeviceSize allocSize,
6838  VmaAllocation hAllocation);
6839 
6840  virtual void Free(const VmaAllocation allocation);
6841  virtual void FreeAtOffset(VkDeviceSize offset);
6842 
6844  // For defragmentation
6845 
6846  bool IsBufferImageGranularityConflictPossible(
6847  VkDeviceSize bufferImageGranularity,
6848  VmaSuballocationType& inOutPrevSuballocType) const;
6849 
6850 private:
6851  friend class VmaDefragmentationAlgorithm_Generic;
6852  friend class VmaDefragmentationAlgorithm_Fast;
6853 
6854  uint32_t m_FreeCount;
6855  VkDeviceSize m_SumFreeSize;
6856  VmaSuballocationList m_Suballocations;
6857  // Suballocations that are free and have size greater than certain threshold.
6858  // Sorted by size, ascending.
6859  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
6860 
6861  bool ValidateFreeSuballocationList() const;
6862 
6863  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6864  // If yes, fills pOffset and returns true. If no, returns false.
6865  bool CheckAllocation(
6866  uint32_t currentFrameIndex,
6867  uint32_t frameInUseCount,
6868  VkDeviceSize bufferImageGranularity,
6869  VkDeviceSize allocSize,
6870  VkDeviceSize allocAlignment,
6871  VmaSuballocationType allocType,
6872  VmaSuballocationList::const_iterator suballocItem,
6873  bool canMakeOtherLost,
6874  VkDeviceSize* pOffset,
6875  size_t* itemsToMakeLostCount,
6876  VkDeviceSize* pSumFreeSize,
6877  VkDeviceSize* pSumItemSize) const;
6878  // Given free suballocation, it merges it with following one, which must also be free.
6879  void MergeFreeWithNext(VmaSuballocationList::iterator item);
6880  // Releases given suballocation, making it free.
6881  // Merges it with adjacent free suballocations if applicable.
6882  // Returns iterator to new free suballocation at this place.
6883  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6884  // Given free suballocation, it inserts it into sorted list of
6885  // m_FreeSuballocationsBySize if it's suitable.
6886  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6887  // Given free suballocation, it removes it from sorted list of
6888  // m_FreeSuballocationsBySize if it's suitable.
6889  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6890 };
6891 
6892 /*
6893 Allocations and their references in internal data structure look like this:
6894 
6895 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6896 
6897  0 +-------+
6898  | |
6899  | |
6900  | |
6901  +-------+
6902  | Alloc | 1st[m_1stNullItemsBeginCount]
6903  +-------+
6904  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6905  +-------+
6906  | ... |
6907  +-------+
6908  | Alloc | 1st[1st.size() - 1]
6909  +-------+
6910  | |
6911  | |
6912  | |
6913 GetSize() +-------+
6914 
6915 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6916 
6917  0 +-------+
6918  | Alloc | 2nd[0]
6919  +-------+
6920  | Alloc | 2nd[1]
6921  +-------+
6922  | ... |
6923  +-------+
6924  | Alloc | 2nd[2nd.size() - 1]
6925  +-------+
6926  | |
6927  | |
6928  | |
6929  +-------+
6930  | Alloc | 1st[m_1stNullItemsBeginCount]
6931  +-------+
6932  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6933  +-------+
6934  | ... |
6935  +-------+
6936  | Alloc | 1st[1st.size() - 1]
6937  +-------+
6938  | |
6939 GetSize() +-------+
6940 
6941 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6942 
6943  0 +-------+
6944  | |
6945  | |
6946  | |
6947  +-------+
6948  | Alloc | 1st[m_1stNullItemsBeginCount]
6949  +-------+
6950  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6951  +-------+
6952  | ... |
6953  +-------+
6954  | Alloc | 1st[1st.size() - 1]
6955  +-------+
6956  | |
6957  | |
6958  | |
6959  +-------+
6960  | Alloc | 2nd[2nd.size() - 1]
6961  +-------+
6962  | ... |
6963  +-------+
6964  | Alloc | 2nd[1]
6965  +-------+
6966  | Alloc | 2nd[0]
6967 GetSize() +-------+
6968 
6969 */
6970 class VmaBlockMetadata_Linear : public VmaBlockMetadata
6971 {
6972  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
6973 public:
6974  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
6975  virtual ~VmaBlockMetadata_Linear();
6976  virtual void Init(VkDeviceSize size);
6977 
6978  virtual bool Validate() const;
6979  virtual size_t GetAllocationCount() const;
6980  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6981  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6982  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
6983 
6984  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6985  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6986 
6987 #if VMA_STATS_STRING_ENABLED
6988  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6989 #endif
6990 
6991  virtual bool CreateAllocationRequest(
6992  uint32_t currentFrameIndex,
6993  uint32_t frameInUseCount,
6994  VkDeviceSize bufferImageGranularity,
6995  VkDeviceSize allocSize,
6996  VkDeviceSize allocAlignment,
6997  bool upperAddress,
6998  VmaSuballocationType allocType,
6999  bool canMakeOtherLost,
7000  uint32_t strategy,
7001  VmaAllocationRequest* pAllocationRequest);
7002 
7003  virtual bool MakeRequestedAllocationsLost(
7004  uint32_t currentFrameIndex,
7005  uint32_t frameInUseCount,
7006  VmaAllocationRequest* pAllocationRequest);
7007 
7008  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
7009 
7010  virtual VkResult CheckCorruption(const void* pBlockData);
7011 
7012  virtual void Alloc(
7013  const VmaAllocationRequest& request,
7014  VmaSuballocationType type,
7015  VkDeviceSize allocSize,
7016  VmaAllocation hAllocation);
7017 
7018  virtual void Free(const VmaAllocation allocation);
7019  virtual void FreeAtOffset(VkDeviceSize offset);
7020 
7021 private:
7022  /*
7023  There are two suballocation vectors, used in ping-pong way.
7024  The one with index m_1stVectorIndex is called 1st.
7025  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
7026  2nd can be non-empty only when 1st is not empty.
7027  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
7028  */
7029  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
7030 
7031  enum SECOND_VECTOR_MODE
7032  {
7033  SECOND_VECTOR_EMPTY,
7034  /*
7035  Suballocations in 2nd vector are created later than the ones in 1st, but they
7036  all have smaller offset.
7037  */
7038  SECOND_VECTOR_RING_BUFFER,
7039  /*
7040  Suballocations in 2nd vector are upper side of double stack.
7041  They all have offsets higher than those in 1st vector.
7042  Top of this stack means smaller offsets, but higher indices in this vector.
7043  */
7044  SECOND_VECTOR_DOUBLE_STACK,
7045  };
7046 
7047  VkDeviceSize m_SumFreeSize;
7048  SuballocationVectorType m_Suballocations0, m_Suballocations1;
7049  uint32_t m_1stVectorIndex;
7050  SECOND_VECTOR_MODE m_2ndVectorMode;
7051 
7052  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
7053  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
7054  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
7055  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
7056 
7057  // Number of items in 1st vector with hAllocation = null at the beginning.
7058  size_t m_1stNullItemsBeginCount;
7059  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
7060  size_t m_1stNullItemsMiddleCount;
7061  // Number of items in 2nd vector with hAllocation = null.
7062  size_t m_2ndNullItemsCount;
7063 
7064  bool ShouldCompact1st() const;
7065  void CleanupAfterFree();
7066 
7067  bool CreateAllocationRequest_LowerAddress(
7068  uint32_t currentFrameIndex,
7069  uint32_t frameInUseCount,
7070  VkDeviceSize bufferImageGranularity,
7071  VkDeviceSize allocSize,
7072  VkDeviceSize allocAlignment,
7073  VmaSuballocationType allocType,
7074  bool canMakeOtherLost,
7075  uint32_t strategy,
7076  VmaAllocationRequest* pAllocationRequest);
7077  bool CreateAllocationRequest_UpperAddress(
7078  uint32_t currentFrameIndex,
7079  uint32_t frameInUseCount,
7080  VkDeviceSize bufferImageGranularity,
7081  VkDeviceSize allocSize,
7082  VkDeviceSize allocAlignment,
7083  VmaSuballocationType allocType,
7084  bool canMakeOtherLost,
7085  uint32_t strategy,
7086  VmaAllocationRequest* pAllocationRequest);
7087 };
7088 
7089 /*
7090 - GetSize() is the original size of allocated memory block.
7091 - m_UsableSize is this size aligned down to a power of two.
7092  All allocations and calculations happen relative to m_UsableSize.
7093 - GetUnusableSize() is the difference between them.
7094  It is reported as separate, unused range, not available for allocations.
7095 
7096 Node at level 0 has size = m_UsableSize.
7097 Each next level contains nodes with size 2 times smaller than current level.
7098 m_LevelCount is the maximum number of levels to use in the current object.
7099 */
7100 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
7101 {
7102  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
7103 public:
7104  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
7105  virtual ~VmaBlockMetadata_Buddy();
7106  virtual void Init(VkDeviceSize size);
7107 
7108  virtual bool Validate() const;
7109  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
7110  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
7111  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
7112  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
7113 
7114  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
7115  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
7116 
7117 #if VMA_STATS_STRING_ENABLED
7118  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
7119 #endif
7120 
7121  virtual bool CreateAllocationRequest(
7122  uint32_t currentFrameIndex,
7123  uint32_t frameInUseCount,
7124  VkDeviceSize bufferImageGranularity,
7125  VkDeviceSize allocSize,
7126  VkDeviceSize allocAlignment,
7127  bool upperAddress,
7128  VmaSuballocationType allocType,
7129  bool canMakeOtherLost,
7130  uint32_t strategy,
7131  VmaAllocationRequest* pAllocationRequest);
7132 
7133  virtual bool MakeRequestedAllocationsLost(
7134  uint32_t currentFrameIndex,
7135  uint32_t frameInUseCount,
7136  VmaAllocationRequest* pAllocationRequest);
7137 
7138  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
7139 
7140  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
7141 
7142  virtual void Alloc(
7143  const VmaAllocationRequest& request,
7144  VmaSuballocationType type,
7145  VkDeviceSize allocSize,
7146  VmaAllocation hAllocation);
7147 
7148  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
7149  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
7150 
7151 private:
7152  static const VkDeviceSize MIN_NODE_SIZE = 32;
7153  static const size_t MAX_LEVELS = 30;
7154 
7155  struct ValidationContext
7156  {
7157  size_t calculatedAllocationCount;
7158  size_t calculatedFreeCount;
7159  VkDeviceSize calculatedSumFreeSize;
7160 
7161  ValidationContext() :
7162  calculatedAllocationCount(0),
7163  calculatedFreeCount(0),
7164  calculatedSumFreeSize(0) { }
7165  };
7166 
7167  struct Node
7168  {
7169  VkDeviceSize offset;
7170  enum TYPE
7171  {
7172  TYPE_FREE,
7173  TYPE_ALLOCATION,
7174  TYPE_SPLIT,
7175  TYPE_COUNT
7176  } type;
7177  Node* parent;
7178  Node* buddy;
7179 
7180  union
7181  {
7182  struct
7183  {
7184  Node* prev;
7185  Node* next;
7186  } free;
7187  struct
7188  {
7189  VmaAllocation alloc;
7190  } allocation;
7191  struct
7192  {
7193  Node* leftChild;
7194  } split;
7195  };
7196  };
7197 
7198  // Size of the memory block aligned down to a power of two.
7199  VkDeviceSize m_UsableSize;
7200  uint32_t m_LevelCount;
7201 
7202  Node* m_Root;
7203  struct {
7204  Node* front;
7205  Node* back;
7206  } m_FreeList[MAX_LEVELS];
7207  // Number of nodes in the tree with type == TYPE_ALLOCATION.
7208  size_t m_AllocationCount;
7209  // Number of nodes in the tree with type == TYPE_FREE.
7210  size_t m_FreeCount;
7211  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
7212  VkDeviceSize m_SumFreeSize;
7213 
7214  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
7215  void DeleteNode(Node* node);
7216  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
7217  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
7218  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
7219  // Alloc passed just for validation. Can be null.
7220  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
7221  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
7222  // Adds node to the front of FreeList at given level.
7223  // node->type must be FREE.
7224  // node->free.prev, next can be undefined.
7225  void AddToFreeListFront(uint32_t level, Node* node);
7226  // Removes node from FreeList at given level.
7227  // node->type must be FREE.
7228  // node->free.prev, next stay untouched.
7229  void RemoveFromFreeList(uint32_t level, Node* node);
7230 
7231 #if VMA_STATS_STRING_ENABLED
7232  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
7233 #endif
7234 };
7235 
7236 /*
7237 Represents a single block of device memory (`VkDeviceMemory`) with all the
7238 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
7239 
7240 Thread-safety: This class must be externally synchronized.
7241 */
7242 class VmaDeviceMemoryBlock
7243 {
7244  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
7245 public:
7246  VmaBlockMetadata* m_pMetadata;
7247 
7248  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
7249 
7250  ~VmaDeviceMemoryBlock()
7251  {
7252  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
7253  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
7254  }
7255 
7256  // Always call after construction.
7257  void Init(
7258  VmaAllocator hAllocator,
7259  VmaPool hParentPool,
7260  uint32_t newMemoryTypeIndex,
7261  VkDeviceMemory newMemory,
7262  VkDeviceSize newSize,
7263  uint32_t id,
7264  uint32_t algorithm);
7265  // Always call before destruction.
7266  void Destroy(VmaAllocator allocator);
7267 
7268  VmaPool GetParentPool() const { return m_hParentPool; }
7269  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
7270  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
7271  uint32_t GetId() const { return m_Id; }
7272  void* GetMappedData() const { return m_pMappedData; }
7273 
7274  // Validates all data structures inside this object. If not valid, returns false.
7275  bool Validate() const;
7276 
7277  VkResult CheckCorruption(VmaAllocator hAllocator);
7278 
7279  // ppData can be null.
7280  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
7281  void Unmap(VmaAllocator hAllocator, uint32_t count);
7282 
7283  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
7284  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
7285 
7286  VkResult BindBufferMemory(
7287  const VmaAllocator hAllocator,
7288  const VmaAllocation hAllocation,
7289  VkDeviceSize allocationLocalOffset,
7290  VkBuffer hBuffer,
7291  const void* pNext);
7292  VkResult BindImageMemory(
7293  const VmaAllocator hAllocator,
7294  const VmaAllocation hAllocation,
7295  VkDeviceSize allocationLocalOffset,
7296  VkImage hImage,
7297  const void* pNext);
7298 
7299 private:
7300  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
7301  uint32_t m_MemoryTypeIndex;
7302  uint32_t m_Id;
7303  VkDeviceMemory m_hMemory;
7304 
7305  /*
7306  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
7307  Also protects m_MapCount, m_pMappedData.
7308  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
7309  */
7310  VMA_MUTEX m_Mutex;
7311  uint32_t m_MapCount;
7312  void* m_pMappedData;
7313 };
7314 
7315 struct VmaDefragmentationMove
7316 {
7317  size_t srcBlockIndex;
7318  size_t dstBlockIndex;
7319  VkDeviceSize srcOffset;
7320  VkDeviceSize dstOffset;
7321  VkDeviceSize size;
7322  VmaAllocation hAllocation;
7323  VmaDeviceMemoryBlock* pSrcBlock;
7324  VmaDeviceMemoryBlock* pDstBlock;
7325 };
7326 
7327 class VmaDefragmentationAlgorithm;
7328 
7329 /*
7330 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
7331 Vulkan memory type.
7332 
7333 Synchronized internally with a mutex.
7334 */
7335 struct VmaBlockVector
7336 {
7337  VMA_CLASS_NO_COPY(VmaBlockVector)
7338 public:
7339  VmaBlockVector(
7340  VmaAllocator hAllocator,
7341  VmaPool hParentPool,
7342  uint32_t memoryTypeIndex,
7343  VkDeviceSize preferredBlockSize,
7344  size_t minBlockCount,
7345  size_t maxBlockCount,
7346  VkDeviceSize bufferImageGranularity,
7347  uint32_t frameInUseCount,
7348  bool explicitBlockSize,
7349  uint32_t algorithm,
7350  float priority,
7351  VkDeviceSize minAllocationAlignment,
7352  void* pMemoryAllocateNext);
7353  ~VmaBlockVector();
7354 
7355  VkResult CreateMinBlocks();
7356 
7357  VmaAllocator GetAllocator() const { return m_hAllocator; }
7358  VmaPool GetParentPool() const { return m_hParentPool; }
7359  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
7360  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
7361  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
7362  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
7363  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
7364  uint32_t GetAlgorithm() const { return m_Algorithm; }
7365 
7366  void GetPoolStats(VmaPoolStats* pStats);
7367 
7368  bool IsEmpty();
7369  bool IsCorruptionDetectionEnabled() const;
7370 
7371  VkResult Allocate(
7372  uint32_t currentFrameIndex,
7373  VkDeviceSize size,
7374  VkDeviceSize alignment,
7375  const VmaAllocationCreateInfo& createInfo,
7376  VmaSuballocationType suballocType,
7377  size_t allocationCount,
7378  VmaAllocation* pAllocations);
7379 
7380  void Free(const VmaAllocation hAllocation);
7381 
7382  // Adds statistics of this BlockVector to pStats.
7383  void AddStats(VmaStats* pStats);
7384 
7385 #if VMA_STATS_STRING_ENABLED
7386  void PrintDetailedMap(class VmaJsonWriter& json);
7387 #endif
7388 
7389  void MakePoolAllocationsLost(
7390  uint32_t currentFrameIndex,
7391  size_t* pLostAllocationCount);
7392  VkResult CheckCorruption();
7393 
7394  // Saves results in pCtx->res.
7395  void Defragment(
7396  class VmaBlockVectorDefragmentationContext* pCtx,
7398  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
7399  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
7400  VkCommandBuffer commandBuffer);
7401  void DefragmentationEnd(
7402  class VmaBlockVectorDefragmentationContext* pCtx,
7403  uint32_t flags,
7404  VmaDefragmentationStats* pStats);
7405 
7406  uint32_t ProcessDefragmentations(
7407  class VmaBlockVectorDefragmentationContext *pCtx,
7408  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
7409 
7410  void CommitDefragmentations(
7411  class VmaBlockVectorDefragmentationContext *pCtx,
7412  VmaDefragmentationStats* pStats);
7413 
7415  // To be used only while the m_Mutex is locked. Used during defragmentation.
7416 
7417  size_t GetBlockCount() const { return m_Blocks.size(); }
7418  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
7419  size_t CalcAllocationCount() const;
7420  bool IsBufferImageGranularityConflictPossible() const;
7421 
7422 private:
7423  friend class VmaDefragmentationAlgorithm_Generic;
7424 
7425  const VmaAllocator m_hAllocator;
7426  const VmaPool m_hParentPool;
7427  const uint32_t m_MemoryTypeIndex;
7428  const VkDeviceSize m_PreferredBlockSize;
7429  const size_t m_MinBlockCount;
7430  const size_t m_MaxBlockCount;
7431  const VkDeviceSize m_BufferImageGranularity;
7432  const uint32_t m_FrameInUseCount;
7433  const bool m_ExplicitBlockSize;
7434  const uint32_t m_Algorithm;
7435  const float m_Priority;
7436  const VkDeviceSize m_MinAllocationAlignment;
7437  void* const m_pMemoryAllocateNext;
7438  VMA_RW_MUTEX m_Mutex;
7439 
7440  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
7441  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
7442  bool m_HasEmptyBlock;
7443  // Incrementally sorted by sumFreeSize, ascending.
7444  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
7445  uint32_t m_NextBlockId;
7446 
7447  VkDeviceSize CalcMaxBlockSize() const;
7448 
7449  // Finds and removes given block from vector.
7450  void Remove(VmaDeviceMemoryBlock* pBlock);
7451 
7452  // Performs single step in sorting m_Blocks. They may not be fully sorted
7453  // after this call.
7454  void IncrementallySortBlocks();
7455 
7456  VkResult AllocatePage(
7457  uint32_t currentFrameIndex,
7458  VkDeviceSize size,
7459  VkDeviceSize alignment,
7460  const VmaAllocationCreateInfo& createInfo,
7461  VmaSuballocationType suballocType,
7462  VmaAllocation* pAllocation);
7463 
7464  // To be used only without CAN_MAKE_OTHER_LOST flag.
7465  VkResult AllocateFromBlock(
7466  VmaDeviceMemoryBlock* pBlock,
7467  uint32_t currentFrameIndex,
7468  VkDeviceSize size,
7469  VkDeviceSize alignment,
7470  VmaAllocationCreateFlags allocFlags,
7471  void* pUserData,
7472  VmaSuballocationType suballocType,
7473  uint32_t strategy,
7474  VmaAllocation* pAllocation);
7475 
7476  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
7477 
7478  // Saves result to pCtx->res.
7479  void ApplyDefragmentationMovesCpu(
7480  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7481  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
7482  // Saves result to pCtx->res.
7483  void ApplyDefragmentationMovesGpu(
7484  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7485  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7486  VkCommandBuffer commandBuffer);
7487 
7488  /*
7489  Used during defragmentation. pDefragmentationStats is optional. It's in/out
7490  - updated with new data.
7491  */
7492  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
7493 
7494  void UpdateHasEmptyBlock();
7495 };
7496 
7497 struct VmaPool_T
7498 {
7499  VMA_CLASS_NO_COPY(VmaPool_T)
7500 public:
7501  VmaBlockVector m_BlockVector;
7502 
7503  VmaPool_T(
7504  VmaAllocator hAllocator,
7505  const VmaPoolCreateInfo& createInfo,
7506  VkDeviceSize preferredBlockSize);
7507  ~VmaPool_T();
7508 
7509  uint32_t GetId() const { return m_Id; }
7510  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
7511 
7512  const char* GetName() const { return m_Name; }
7513  void SetName(const char* pName);
7514 
7515 #if VMA_STATS_STRING_ENABLED
7516  //void PrintDetailedMap(class VmaStringBuilder& sb);
7517 #endif
7518 
7519 private:
7520  uint32_t m_Id;
7521  char* m_Name;
7522  VmaPool_T* m_PrevPool = VMA_NULL;
7523  VmaPool_T* m_NextPool = VMA_NULL;
7524  friend struct VmaPoolListItemTraits;
7525 };
7526 
7527 struct VmaPoolListItemTraits
7528 {
7529  typedef VmaPool_T ItemType;
7530  static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
7531  static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
7532  static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
7533  static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
7534 };
7535 
7536 /*
7537 Performs defragmentation:
7538 
7539 - Updates `pBlockVector->m_pMetadata`.
7540 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
7541 - Does not move actual data, only returns requested moves as `moves`.
7542 */
7543 class VmaDefragmentationAlgorithm
7544 {
7545  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
7546 public:
7547  VmaDefragmentationAlgorithm(
7548  VmaAllocator hAllocator,
7549  VmaBlockVector* pBlockVector,
7550  uint32_t currentFrameIndex) :
7551  m_hAllocator(hAllocator),
7552  m_pBlockVector(pBlockVector),
7553  m_CurrentFrameIndex(currentFrameIndex)
7554  {
7555  }
7556  virtual ~VmaDefragmentationAlgorithm()
7557  {
7558  }
7559 
7560  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
7561  virtual void AddAll() = 0;
7562 
7563  virtual VkResult Defragment(
7564  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7565  VkDeviceSize maxBytesToMove,
7566  uint32_t maxAllocationsToMove,
7567  VmaDefragmentationFlags flags) = 0;
7568 
7569  virtual VkDeviceSize GetBytesMoved() const = 0;
7570  virtual uint32_t GetAllocationsMoved() const = 0;
7571 
7572 protected:
7573  VmaAllocator const m_hAllocator;
7574  VmaBlockVector* const m_pBlockVector;
7575  const uint32_t m_CurrentFrameIndex;
7576 
7577  struct AllocationInfo
7578  {
7579  VmaAllocation m_hAllocation;
7580  VkBool32* m_pChanged;
7581 
7582  AllocationInfo() :
7583  m_hAllocation(VK_NULL_HANDLE),
7584  m_pChanged(VMA_NULL)
7585  {
7586  }
7587  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
7588  m_hAllocation(hAlloc),
7589  m_pChanged(pChanged)
7590  {
7591  }
7592  };
7593 };
7594 
7595 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
7596 {
7597  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
7598 public:
7599  VmaDefragmentationAlgorithm_Generic(
7600  VmaAllocator hAllocator,
7601  VmaBlockVector* pBlockVector,
7602  uint32_t currentFrameIndex,
7603  bool overlappingMoveSupported);
7604  virtual ~VmaDefragmentationAlgorithm_Generic();
7605 
7606  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7607  virtual void AddAll() { m_AllAllocations = true; }
7608 
7609  virtual VkResult Defragment(
7610  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7611  VkDeviceSize maxBytesToMove,
7612  uint32_t maxAllocationsToMove,
7613  VmaDefragmentationFlags flags);
7614 
7615  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7616  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7617 
7618 private:
7619  uint32_t m_AllocationCount;
7620  bool m_AllAllocations;
7621 
7622  VkDeviceSize m_BytesMoved;
7623  uint32_t m_AllocationsMoved;
7624 
7625  struct AllocationInfoSizeGreater
7626  {
7627  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7628  {
7629  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
7630  }
7631  };
7632 
7633  struct AllocationInfoOffsetGreater
7634  {
7635  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7636  {
7637  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
7638  }
7639  };
7640 
7641  struct BlockInfo
7642  {
7643  size_t m_OriginalBlockIndex;
7644  VmaDeviceMemoryBlock* m_pBlock;
7645  bool m_HasNonMovableAllocations;
7646  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
7647 
7648  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
7649  m_OriginalBlockIndex(SIZE_MAX),
7650  m_pBlock(VMA_NULL),
7651  m_HasNonMovableAllocations(true),
7652  m_Allocations(pAllocationCallbacks)
7653  {
7654  }
7655 
7656  void CalcHasNonMovableAllocations()
7657  {
7658  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
7659  const size_t defragmentAllocCount = m_Allocations.size();
7660  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
7661  }
7662 
7663  void SortAllocationsBySizeDescending()
7664  {
7665  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
7666  }
7667 
7668  void SortAllocationsByOffsetDescending()
7669  {
7670  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
7671  }
7672  };
7673 
7674  struct BlockPointerLess
7675  {
7676  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
7677  {
7678  return pLhsBlockInfo->m_pBlock < pRhsBlock;
7679  }
7680  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7681  {
7682  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
7683  }
7684  };
7685 
7686  // 1. Blocks with some non-movable allocations go first.
7687  // 2. Blocks with smaller sumFreeSize go first.
7688  struct BlockInfoCompareMoveDestination
7689  {
7690  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7691  {
7692  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
7693  {
7694  return true;
7695  }
7696  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
7697  {
7698  return false;
7699  }
7700  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
7701  {
7702  return true;
7703  }
7704  return false;
7705  }
7706  };
7707 
7708  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
7709  BlockInfoVector m_Blocks;
7710 
7711  VkResult DefragmentRound(
7712  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7713  VkDeviceSize maxBytesToMove,
7714  uint32_t maxAllocationsToMove,
7715  bool freeOldAllocations);
7716 
7717  size_t CalcBlocksWithNonMovableCount() const;
7718 
7719  static bool MoveMakesSense(
7720  size_t dstBlockIndex, VkDeviceSize dstOffset,
7721  size_t srcBlockIndex, VkDeviceSize srcOffset);
7722 };
7723 
7724 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
7725 {
7726  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
7727 public:
7728  VmaDefragmentationAlgorithm_Fast(
7729  VmaAllocator hAllocator,
7730  VmaBlockVector* pBlockVector,
7731  uint32_t currentFrameIndex,
7732  bool overlappingMoveSupported);
7733  virtual ~VmaDefragmentationAlgorithm_Fast();
7734 
7735  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
7736  virtual void AddAll() { m_AllAllocations = true; }
7737 
7738  virtual VkResult Defragment(
7739  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7740  VkDeviceSize maxBytesToMove,
7741  uint32_t maxAllocationsToMove,
7742  VmaDefragmentationFlags flags);
7743 
7744  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7745  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7746 
7747 private:
7748  struct BlockInfo
7749  {
7750  size_t origBlockIndex;
7751  };
7752 
7753  class FreeSpaceDatabase
7754  {
7755  public:
7756  FreeSpaceDatabase()
7757  {
7758  FreeSpace s = {};
7759  s.blockInfoIndex = SIZE_MAX;
7760  for(size_t i = 0; i < MAX_COUNT; ++i)
7761  {
7762  m_FreeSpaces[i] = s;
7763  }
7764  }
7765 
7766  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
7767  {
7768  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7769  {
7770  return;
7771  }
7772 
7773  // Find first invalid or the smallest structure.
7774  size_t bestIndex = SIZE_MAX;
7775  for(size_t i = 0; i < MAX_COUNT; ++i)
7776  {
7777  // Empty structure.
7778  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
7779  {
7780  bestIndex = i;
7781  break;
7782  }
7783  if(m_FreeSpaces[i].size < size &&
7784  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
7785  {
7786  bestIndex = i;
7787  }
7788  }
7789 
7790  if(bestIndex != SIZE_MAX)
7791  {
7792  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
7793  m_FreeSpaces[bestIndex].offset = offset;
7794  m_FreeSpaces[bestIndex].size = size;
7795  }
7796  }
7797 
7798  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
7799  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
7800  {
7801  size_t bestIndex = SIZE_MAX;
7802  VkDeviceSize bestFreeSpaceAfter = 0;
7803  for(size_t i = 0; i < MAX_COUNT; ++i)
7804  {
7805  // Structure is valid.
7806  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
7807  {
7808  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
7809  // Allocation fits into this structure.
7810  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
7811  {
7812  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
7813  (dstOffset + size);
7814  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
7815  {
7816  bestIndex = i;
7817  bestFreeSpaceAfter = freeSpaceAfter;
7818  }
7819  }
7820  }
7821  }
7822 
7823  if(bestIndex != SIZE_MAX)
7824  {
7825  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
7826  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
7827 
7828  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7829  {
7830  // Leave this structure for remaining empty space.
7831  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
7832  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
7833  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
7834  }
7835  else
7836  {
7837  // This structure becomes invalid.
7838  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
7839  }
7840 
7841  return true;
7842  }
7843 
7844  return false;
7845  }
7846 
7847  private:
7848  static const size_t MAX_COUNT = 4;
7849 
7850  struct FreeSpace
7851  {
7852  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
7853  VkDeviceSize offset;
7854  VkDeviceSize size;
7855  } m_FreeSpaces[MAX_COUNT];
7856  };
7857 
7858  const bool m_OverlappingMoveSupported;
7859 
7860  uint32_t m_AllocationCount;
7861  bool m_AllAllocations;
7862 
7863  VkDeviceSize m_BytesMoved;
7864  uint32_t m_AllocationsMoved;
7865 
7866  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
7867 
7868  void PreprocessMetadata();
7869  void PostprocessMetadata();
7870  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
7871 };
7872 
7873 struct VmaBlockDefragmentationContext
7874 {
7875  enum BLOCK_FLAG
7876  {
7877  BLOCK_FLAG_USED = 0x00000001,
7878  };
7879  uint32_t flags;
7880  VkBuffer hBuffer;
7881 };
7882 
7883 class VmaBlockVectorDefragmentationContext
7884 {
7885  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
7886 public:
7887  VkResult res;
7888  bool mutexLocked;
7889  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
7890  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
7891  uint32_t defragmentationMovesProcessed;
7892  uint32_t defragmentationMovesCommitted;
7893  bool hasDefragmentationPlan;
7894 
7895  VmaBlockVectorDefragmentationContext(
7896  VmaAllocator hAllocator,
7897  VmaPool hCustomPool, // Optional.
7898  VmaBlockVector* pBlockVector,
7899  uint32_t currFrameIndex);
7900  ~VmaBlockVectorDefragmentationContext();
7901 
7902  VmaPool GetCustomPool() const { return m_hCustomPool; }
7903  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7904  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7905 
7906  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7907  void AddAll() { m_AllAllocations = true; }
7908 
7909  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7910 
7911 private:
7912  const VmaAllocator m_hAllocator;
7913  // Null if not from custom pool.
7914  const VmaPool m_hCustomPool;
7915  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7916  VmaBlockVector* const m_pBlockVector;
7917  const uint32_t m_CurrFrameIndex;
7918  // Owner of this object.
7919  VmaDefragmentationAlgorithm* m_pAlgorithm;
7920 
7921  struct AllocInfo
7922  {
7923  VmaAllocation hAlloc;
7924  VkBool32* pChanged;
7925  };
7926  // Used between constructor and Begin.
7927  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7928  bool m_AllAllocations;
7929 };
7930 
7931 struct VmaDefragmentationContext_T
7932 {
7933 private:
7934  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7935 public:
7936  VmaDefragmentationContext_T(
7937  VmaAllocator hAllocator,
7938  uint32_t currFrameIndex,
7939  uint32_t flags,
7940  VmaDefragmentationStats* pStats);
7941  ~VmaDefragmentationContext_T();
7942 
7943  void AddPools(uint32_t poolCount, const VmaPool* pPools);
7944  void AddAllocations(
7945  uint32_t allocationCount,
7946  const VmaAllocation* pAllocations,
7947  VkBool32* pAllocationsChanged);
7948 
7949  /*
7950  Returns:
7951  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7952  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7953  - Negative value if error occurred and object can be destroyed immediately.
7954  */
7955  VkResult Defragment(
7956  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
7957  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
7958  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
7959 
7960  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
7961  VkResult DefragmentPassEnd();
7962 
7963 private:
7964  const VmaAllocator m_hAllocator;
7965  const uint32_t m_CurrFrameIndex;
7966  const uint32_t m_Flags;
7967  VmaDefragmentationStats* const m_pStats;
7968 
7969  VkDeviceSize m_MaxCpuBytesToMove;
7970  uint32_t m_MaxCpuAllocationsToMove;
7971  VkDeviceSize m_MaxGpuBytesToMove;
7972  uint32_t m_MaxGpuAllocationsToMove;
7973 
7974  // Owner of these objects.
7975  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
7976  // Owner of these objects.
7977  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
7978 };
7979 
7980 #if VMA_RECORDING_ENABLED
7981 
7982 class VmaRecorder
7983 {
7984 public:
7985  VmaRecorder();
7986  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
7987  void WriteConfiguration(
7988  const VkPhysicalDeviceProperties& devProps,
7989  const VkPhysicalDeviceMemoryProperties& memProps,
7990  uint32_t vulkanApiVersion,
7991  bool dedicatedAllocationExtensionEnabled,
7992  bool bindMemory2ExtensionEnabled,
7993  bool memoryBudgetExtensionEnabled,
7994  bool deviceCoherentMemoryExtensionEnabled);
7995  ~VmaRecorder();
7996 
7997  void RecordCreateAllocator(uint32_t frameIndex);
7998  void RecordDestroyAllocator(uint32_t frameIndex);
7999  void RecordCreatePool(uint32_t frameIndex,
8000  const VmaPoolCreateInfo& createInfo,
8001  VmaPool pool);
8002  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
8003  void RecordAllocateMemory(uint32_t frameIndex,
8004  const VkMemoryRequirements& vkMemReq,
8005  const VmaAllocationCreateInfo& createInfo,
8006  VmaAllocation allocation);
8007  void RecordAllocateMemoryPages(uint32_t frameIndex,
8008  const VkMemoryRequirements& vkMemReq,
8009  const VmaAllocationCreateInfo& createInfo,
8010  uint64_t allocationCount,
8011  const VmaAllocation* pAllocations);
8012  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
8013  const VkMemoryRequirements& vkMemReq,
8014  bool requiresDedicatedAllocation,
8015  bool prefersDedicatedAllocation,
8016  const VmaAllocationCreateInfo& createInfo,
8017  VmaAllocation allocation);
8018  void RecordAllocateMemoryForImage(uint32_t frameIndex,
8019  const VkMemoryRequirements& vkMemReq,
8020  bool requiresDedicatedAllocation,
8021  bool prefersDedicatedAllocation,
8022  const VmaAllocationCreateInfo& createInfo,
8023  VmaAllocation allocation);
8024  void RecordFreeMemory(uint32_t frameIndex,
8025  VmaAllocation allocation);
8026  void RecordFreeMemoryPages(uint32_t frameIndex,
8027  uint64_t allocationCount,
8028  const VmaAllocation* pAllocations);
8029  void RecordSetAllocationUserData(uint32_t frameIndex,
8030  VmaAllocation allocation,
8031  const void* pUserData);
8032  void RecordCreateLostAllocation(uint32_t frameIndex,
8033  VmaAllocation allocation);
8034  void RecordMapMemory(uint32_t frameIndex,
8035  VmaAllocation allocation);
8036  void RecordUnmapMemory(uint32_t frameIndex,
8037  VmaAllocation allocation);
8038  void RecordFlushAllocation(uint32_t frameIndex,
8039  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
8040  void RecordInvalidateAllocation(uint32_t frameIndex,
8041  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
8042  void RecordCreateBuffer(uint32_t frameIndex,
8043  const VkBufferCreateInfo& bufCreateInfo,
8044  const VmaAllocationCreateInfo& allocCreateInfo,
8045  VmaAllocation allocation);
8046  void RecordCreateImage(uint32_t frameIndex,
8047  const VkImageCreateInfo& imageCreateInfo,
8048  const VmaAllocationCreateInfo& allocCreateInfo,
8049  VmaAllocation allocation);
8050  void RecordDestroyBuffer(uint32_t frameIndex,
8051  VmaAllocation allocation);
8052  void RecordDestroyImage(uint32_t frameIndex,
8053  VmaAllocation allocation);
8054  void RecordTouchAllocation(uint32_t frameIndex,
8055  VmaAllocation allocation);
8056  void RecordGetAllocationInfo(uint32_t frameIndex,
8057  VmaAllocation allocation);
8058  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
8059  VmaPool pool);
8060  void RecordDefragmentationBegin(uint32_t frameIndex,
8061  const VmaDefragmentationInfo2& info,
8063  void RecordDefragmentationEnd(uint32_t frameIndex,
8065  void RecordSetPoolName(uint32_t frameIndex,
8066  VmaPool pool,
8067  const char* name);
8068 
8069 private:
8070  struct CallParams
8071  {
8072  uint32_t threadId;
8073  double time;
8074  };
8075 
8076  class UserDataString
8077  {
8078  public:
8079  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
8080  const char* GetString() const { return m_Str; }
8081 
8082  private:
8083  char m_PtrStr[17];
8084  const char* m_Str;
8085  };
8086 
8087  bool m_UseMutex;
8088  VmaRecordFlags m_Flags;
8089  FILE* m_File;
8090  VMA_MUTEX m_FileMutex;
8091  std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
8092 
8093  void GetBasicParams(CallParams& outParams);
8094 
8095  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
8096  template<typename T>
8097  void PrintPointerList(uint64_t count, const T* pItems)
8098  {
8099  if(count)
8100  {
8101  fprintf(m_File, "%p", pItems[0]);
8102  for(uint64_t i = 1; i < count; ++i)
8103  {
8104  fprintf(m_File, " %p", pItems[i]);
8105  }
8106  }
8107  }
8108 
8109  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
8110  void Flush();
8111 };
8112 
8113 #endif // #if VMA_RECORDING_ENABLED
8114 
8115 /*
8116 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
8117 */
8118 class VmaAllocationObjectAllocator
8119 {
8120  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
8121 public:
8122  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
8123 
8124  template<typename... Types> VmaAllocation Allocate(Types... args);
8125  void Free(VmaAllocation hAlloc);
8126 
8127 private:
8128  VMA_MUTEX m_Mutex;
8129  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
8130 };
8131 
8132 struct VmaCurrentBudgetData
8133 {
8134  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
8135  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
8136 
8137 #if VMA_MEMORY_BUDGET
8138  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
8139  VMA_RW_MUTEX m_BudgetMutex;
8140  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
8141  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
8142  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
8143 #endif // #if VMA_MEMORY_BUDGET
8144 
8145  VmaCurrentBudgetData()
8146  {
8147  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
8148  {
8149  m_BlockBytes[heapIndex] = 0;
8150  m_AllocationBytes[heapIndex] = 0;
8151 #if VMA_MEMORY_BUDGET
8152  m_VulkanUsage[heapIndex] = 0;
8153  m_VulkanBudget[heapIndex] = 0;
8154  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
8155 #endif
8156  }
8157 
8158 #if VMA_MEMORY_BUDGET
8159  m_OperationsSinceBudgetFetch = 0;
8160 #endif
8161  }
8162 
8163  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
8164  {
8165  m_AllocationBytes[heapIndex] += allocationSize;
8166 #if VMA_MEMORY_BUDGET
8167  ++m_OperationsSinceBudgetFetch;
8168 #endif
8169  }
8170 
8171  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
8172  {
8173  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
8174  m_AllocationBytes[heapIndex] -= allocationSize;
8175 #if VMA_MEMORY_BUDGET
8176  ++m_OperationsSinceBudgetFetch;
8177 #endif
8178  }
8179 };
8180 
8181 // Main allocator object.
8182 struct VmaAllocator_T
8183 {
8184  VMA_CLASS_NO_COPY(VmaAllocator_T)
8185 public:
8186  bool m_UseMutex;
8187  uint32_t m_VulkanApiVersion;
8188  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
8189  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
8190  bool m_UseExtMemoryBudget;
8191  bool m_UseAmdDeviceCoherentMemory;
8192  bool m_UseKhrBufferDeviceAddress;
8193  bool m_UseExtMemoryPriority;
8194  VkDevice m_hDevice;
8195  VkInstance m_hInstance;
8196  bool m_AllocationCallbacksSpecified;
8197  VkAllocationCallbacks m_AllocationCallbacks;
8198  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
8199  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
8200 
8201  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
8202  uint32_t m_HeapSizeLimitMask;
8203 
8204  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
8205  VkPhysicalDeviceMemoryProperties m_MemProps;
8206 
8207  // Default pools.
8208  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
8209 
8210  typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
8211  DedicatedAllocationLinkedList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
8212  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
8213 
8214  VmaCurrentBudgetData m_Budget;
8215  VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
8216 
8217  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
8218  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
8219  ~VmaAllocator_T();
8220 
8221  const VkAllocationCallbacks* GetAllocationCallbacks() const
8222  {
8223  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
8224  }
8225  const VmaVulkanFunctions& GetVulkanFunctions() const
8226  {
8227  return m_VulkanFunctions;
8228  }
8229 
8230  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
8231 
8232  VkDeviceSize GetBufferImageGranularity() const
8233  {
8234  return VMA_MAX(
8235  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
8236  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
8237  }
8238 
8239  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
8240  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
8241 
8242  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
8243  {
8244  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
8245  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
8246  }
8247  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
8248  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
8249  {
8250  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
8251  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
8252  }
8253  // Minimum alignment for all allocations in specific memory type.
8254  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
8255  {
8256  return IsMemoryTypeNonCoherent(memTypeIndex) ?
8257  VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
8258  (VkDeviceSize)VMA_MIN_ALIGNMENT;
8259  }
8260 
8261  bool IsIntegratedGpu() const
8262  {
8263  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
8264  }
8265 
8266  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
8267 
8268 #if VMA_RECORDING_ENABLED
8269  VmaRecorder* GetRecorder() const { return m_pRecorder; }
8270 #endif
8271 
8272  void GetBufferMemoryRequirements(
8273  VkBuffer hBuffer,
8274  VkMemoryRequirements& memReq,
8275  bool& requiresDedicatedAllocation,
8276  bool& prefersDedicatedAllocation) const;
8277  void GetImageMemoryRequirements(
8278  VkImage hImage,
8279  VkMemoryRequirements& memReq,
8280  bool& requiresDedicatedAllocation,
8281  bool& prefersDedicatedAllocation) const;
8282 
8283  // Main allocation function.
8284  VkResult AllocateMemory(
8285  const VkMemoryRequirements& vkMemReq,
8286  bool requiresDedicatedAllocation,
8287  bool prefersDedicatedAllocation,
8288  VkBuffer dedicatedBuffer,
8289  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
8290  VkImage dedicatedImage,
8291  const VmaAllocationCreateInfo& createInfo,
8292  VmaSuballocationType suballocType,
8293  size_t allocationCount,
8294  VmaAllocation* pAllocations);
8295 
8296  // Main deallocation function.
8297  void FreeMemory(
8298  size_t allocationCount,
8299  const VmaAllocation* pAllocations);
8300 
8301  void CalculateStats(VmaStats* pStats);
8302 
8303  void GetBudget(
8304  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
8305 
8306 #if VMA_STATS_STRING_ENABLED
8307  void PrintDetailedMap(class VmaJsonWriter& json);
8308 #endif
8309 
8310  VkResult DefragmentationBegin(
8311  const VmaDefragmentationInfo2& info,
8312  VmaDefragmentationStats* pStats,
8313  VmaDefragmentationContext* pContext);
8314  VkResult DefragmentationEnd(
8315  VmaDefragmentationContext context);
8316 
8317  VkResult DefragmentationPassBegin(
8319  VmaDefragmentationContext context);
8320  VkResult DefragmentationPassEnd(
8321  VmaDefragmentationContext context);
8322 
8323  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
8324  bool TouchAllocation(VmaAllocation hAllocation);
8325 
8326  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
8327  void DestroyPool(VmaPool pool);
8328  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
8329 
8330  void SetCurrentFrameIndex(uint32_t frameIndex);
8331  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
8332 
8333  void MakePoolAllocationsLost(
8334  VmaPool hPool,
8335  size_t* pLostAllocationCount);
8336  VkResult CheckPoolCorruption(VmaPool hPool);
8337  VkResult CheckCorruption(uint32_t memoryTypeBits);
8338 
8339  void CreateLostAllocation(VmaAllocation* pAllocation);
8340 
8341  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
8342  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
8343  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
8344  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
8345  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
8346  VkResult BindVulkanBuffer(
8347  VkDeviceMemory memory,
8348  VkDeviceSize memoryOffset,
8349  VkBuffer buffer,
8350  const void* pNext);
8351  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
8352  VkResult BindVulkanImage(
8353  VkDeviceMemory memory,
8354  VkDeviceSize memoryOffset,
8355  VkImage image,
8356  const void* pNext);
8357 
8358  VkResult Map(VmaAllocation hAllocation, void** ppData);
8359  void Unmap(VmaAllocation hAllocation);
8360 
8361  VkResult BindBufferMemory(
8362  VmaAllocation hAllocation,
8363  VkDeviceSize allocationLocalOffset,
8364  VkBuffer hBuffer,
8365  const void* pNext);
8366  VkResult BindImageMemory(
8367  VmaAllocation hAllocation,
8368  VkDeviceSize allocationLocalOffset,
8369  VkImage hImage,
8370  const void* pNext);
8371 
8372  VkResult FlushOrInvalidateAllocation(
8373  VmaAllocation hAllocation,
8374  VkDeviceSize offset, VkDeviceSize size,
8375  VMA_CACHE_OPERATION op);
8376  VkResult FlushOrInvalidateAllocations(
8377  uint32_t allocationCount,
8378  const VmaAllocation* allocations,
8379  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
8380  VMA_CACHE_OPERATION op);
8381 
8382  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
8383 
8384  /*
8385  Returns bit mask of memory types that can support defragmentation on GPU as
8386  they support creation of required buffer for copy operations.
8387  */
8388  uint32_t GetGpuDefragmentationMemoryTypeBits();
8389 
8390 
8391 private:
8392  VkDeviceSize m_PreferredLargeHeapBlockSize;
8393 
8394  VkPhysicalDevice m_PhysicalDevice;
8395  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
8396  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
8397 
8398  VMA_RW_MUTEX m_PoolsMutex;
8399  typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
8400  // Protected by m_PoolsMutex.
8401  PoolList m_Pools;
8402  uint32_t m_NextPoolId;
8403 
8404  VmaVulkanFunctions m_VulkanFunctions;
8405 
8406  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
8407  uint32_t m_GlobalMemoryTypeBits;
8408 
8409 #if VMA_RECORDING_ENABLED
8410  VmaRecorder* m_pRecorder;
8411 #endif
8412 
8413  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
8414 
8415 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
8416  void ImportVulkanFunctions_Static();
8417 #endif
8418 
8419  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
8420 
8421 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
8422  void ImportVulkanFunctions_Dynamic();
8423 #endif
8424 
8425  void ValidateVulkanFunctions();
8426 
8427  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
8428 
8429  VkResult AllocateMemoryOfType(
8430  VkDeviceSize size,
8431  VkDeviceSize alignment,
8432  bool dedicatedAllocation,
8433  VkBuffer dedicatedBuffer,
8434  VkBufferUsageFlags dedicatedBufferUsage,
8435  VkImage dedicatedImage,
8436  const VmaAllocationCreateInfo& createInfo,
8437  uint32_t memTypeIndex,
8438  VmaSuballocationType suballocType,
8439  size_t allocationCount,
8440  VmaAllocation* pAllocations);
8441 
8442  // Helper function only to be used inside AllocateDedicatedMemory.
8443  VkResult AllocateDedicatedMemoryPage(
8444  VkDeviceSize size,
8445  VmaSuballocationType suballocType,
8446  uint32_t memTypeIndex,
8447  const VkMemoryAllocateInfo& allocInfo,
8448  bool map,
8449  bool isUserDataString,
8450  void* pUserData,
8451  VmaAllocation* pAllocation);
8452 
8453  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
8454  VkResult AllocateDedicatedMemory(
8455  VkDeviceSize size,
8456  VmaSuballocationType suballocType,
8457  uint32_t memTypeIndex,
8458  bool withinBudget,
8459  bool map,
8460  bool isUserDataString,
8461  void* pUserData,
8462  float priority,
8463  VkBuffer dedicatedBuffer,
8464  VkBufferUsageFlags dedicatedBufferUsage,
8465  VkImage dedicatedImage,
8466  size_t allocationCount,
8467  VmaAllocation* pAllocations);
8468 
8469  void FreeDedicatedMemory(const VmaAllocation allocation);
8470 
8471  /*
8472  Calculates and returns bit mask of memory types that can support defragmentation
8473  on GPU as they support creation of required buffer for copy operations.
8474  */
8475  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
8476 
8477  uint32_t CalculateGlobalMemoryTypeBits() const;
8478 
8479  bool GetFlushOrInvalidateRange(
8480  VmaAllocation allocation,
8481  VkDeviceSize offset, VkDeviceSize size,
8482  VkMappedMemoryRange& outRange) const;
8483 
8484 #if VMA_MEMORY_BUDGET
8485  void UpdateVulkanBudget();
8486 #endif // #if VMA_MEMORY_BUDGET
8487 };
8488 
8490 // Memory allocation #2 after VmaAllocator_T definition
8491 
8492 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
8493 {
8494  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
8495 }
8496 
8497 static void VmaFree(VmaAllocator hAllocator, void* ptr)
8498 {
8499  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
8500 }
8501 
8502 template<typename T>
8503 static T* VmaAllocate(VmaAllocator hAllocator)
8504 {
8505  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
8506 }
8507 
8508 template<typename T>
8509 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
8510 {
8511  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
8512 }
8513 
8514 template<typename T>
8515 static void vma_delete(VmaAllocator hAllocator, T* ptr)
8516 {
8517  if(ptr != VMA_NULL)
8518  {
8519  ptr->~T();
8520  VmaFree(hAllocator, ptr);
8521  }
8522 }
8523 
8524 template<typename T>
8525 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
8526 {
8527  if(ptr != VMA_NULL)
8528  {
8529  for(size_t i = count; i--; )
8530  ptr[i].~T();
8531  VmaFree(hAllocator, ptr);
8532  }
8533 }
8534 
8536 // VmaStringBuilder
8537 
8538 #if VMA_STATS_STRING_ENABLED
8539 
8540 class VmaStringBuilder
8541 {
8542 public:
8543  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
8544  size_t GetLength() const { return m_Data.size(); }
8545  const char* GetData() const { return m_Data.data(); }
8546 
8547  void Add(char ch) { m_Data.push_back(ch); }
8548  void Add(const char* pStr);
8549  void AddNewLine() { Add('\n'); }
8550  void AddNumber(uint32_t num);
8551  void AddNumber(uint64_t num);
8552  void AddPointer(const void* ptr);
8553 
8554 private:
8555  VmaVector< char, VmaStlAllocator<char> > m_Data;
8556 };
8557 
8558 void VmaStringBuilder::Add(const char* pStr)
8559 {
8560  const size_t strLen = strlen(pStr);
8561  if(strLen > 0)
8562  {
8563  const size_t oldCount = m_Data.size();
8564  m_Data.resize(oldCount + strLen);
8565  memcpy(m_Data.data() + oldCount, pStr, strLen);
8566  }
8567 }
8568 
8569 void VmaStringBuilder::AddNumber(uint32_t num)
8570 {
8571  char buf[11];
8572  buf[10] = '\0';
8573  char *p = &buf[10];
8574  do
8575  {
8576  *--p = '0' + (num % 10);
8577  num /= 10;
8578  }
8579  while(num);
8580  Add(p);
8581 }
8582 
8583 void VmaStringBuilder::AddNumber(uint64_t num)
8584 {
8585  char buf[21];
8586  buf[20] = '\0';
8587  char *p = &buf[20];
8588  do
8589  {
8590  *--p = '0' + (num % 10);
8591  num /= 10;
8592  }
8593  while(num);
8594  Add(p);
8595 }
8596 
8597 void VmaStringBuilder::AddPointer(const void* ptr)
8598 {
8599  char buf[21];
8600  VmaPtrToStr(buf, sizeof(buf), ptr);
8601  Add(buf);
8602 }
8603 
8604 #endif // #if VMA_STATS_STRING_ENABLED
8605 
8607 // VmaJsonWriter
8608 
8609 #if VMA_STATS_STRING_ENABLED
8610 
8611 class VmaJsonWriter
8612 {
8613  VMA_CLASS_NO_COPY(VmaJsonWriter)
8614 public:
8615  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
8616  ~VmaJsonWriter();
8617 
8618  void BeginObject(bool singleLine = false);
8619  void EndObject();
8620 
8621  void BeginArray(bool singleLine = false);
8622  void EndArray();
8623 
8624  void WriteString(const char* pStr);
8625  void BeginString(const char* pStr = VMA_NULL);
8626  void ContinueString(const char* pStr);
8627  void ContinueString(uint32_t n);
8628  void ContinueString(uint64_t n);
8629  void ContinueString_Pointer(const void* ptr);
8630  void EndString(const char* pStr = VMA_NULL);
8631 
8632  void WriteNumber(uint32_t n);
8633  void WriteNumber(uint64_t n);
8634  void WriteBool(bool b);
8635  void WriteNull();
8636 
8637 private:
8638  static const char* const INDENT;
8639 
8640  enum COLLECTION_TYPE
8641  {
8642  COLLECTION_TYPE_OBJECT,
8643  COLLECTION_TYPE_ARRAY,
8644  };
8645  struct StackItem
8646  {
8647  COLLECTION_TYPE type;
8648  uint32_t valueCount;
8649  bool singleLineMode;
8650  };
8651 
8652  VmaStringBuilder& m_SB;
8653  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
8654  bool m_InsideString;
8655 
8656  void BeginValue(bool isString);
8657  void WriteIndent(bool oneLess = false);
8658 };
8659 
8660 const char* const VmaJsonWriter::INDENT = " ";
8661 
8662 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
8663  m_SB(sb),
8664  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
8665  m_InsideString(false)
8666 {
8667 }
8668 
8669 VmaJsonWriter::~VmaJsonWriter()
8670 {
8671  VMA_ASSERT(!m_InsideString);
8672  VMA_ASSERT(m_Stack.empty());
8673 }
8674 
8675 void VmaJsonWriter::BeginObject(bool singleLine)
8676 {
8677  VMA_ASSERT(!m_InsideString);
8678 
8679  BeginValue(false);
8680  m_SB.Add('{');
8681 
8682  StackItem item;
8683  item.type = COLLECTION_TYPE_OBJECT;
8684  item.valueCount = 0;
8685  item.singleLineMode = singleLine;
8686  m_Stack.push_back(item);
8687 }
8688 
8689 void VmaJsonWriter::EndObject()
8690 {
8691  VMA_ASSERT(!m_InsideString);
8692 
8693  WriteIndent(true);
8694  m_SB.Add('}');
8695 
8696  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
8697  m_Stack.pop_back();
8698 }
8699 
8700 void VmaJsonWriter::BeginArray(bool singleLine)
8701 {
8702  VMA_ASSERT(!m_InsideString);
8703 
8704  BeginValue(false);
8705  m_SB.Add('[');
8706 
8707  StackItem item;
8708  item.type = COLLECTION_TYPE_ARRAY;
8709  item.valueCount = 0;
8710  item.singleLineMode = singleLine;
8711  m_Stack.push_back(item);
8712 }
8713 
8714 void VmaJsonWriter::EndArray()
8715 {
8716  VMA_ASSERT(!m_InsideString);
8717 
8718  WriteIndent(true);
8719  m_SB.Add(']');
8720 
8721  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
8722  m_Stack.pop_back();
8723 }
8724 
8725 void VmaJsonWriter::WriteString(const char* pStr)
8726 {
8727  BeginString(pStr);
8728  EndString();
8729 }
8730 
8731 void VmaJsonWriter::BeginString(const char* pStr)
8732 {
8733  VMA_ASSERT(!m_InsideString);
8734 
8735  BeginValue(true);
8736  m_SB.Add('"');
8737  m_InsideString = true;
8738  if(pStr != VMA_NULL && pStr[0] != '\0')
8739  {
8740  ContinueString(pStr);
8741  }
8742 }
8743 
8744 void VmaJsonWriter::ContinueString(const char* pStr)
8745 {
8746  VMA_ASSERT(m_InsideString);
8747 
8748  const size_t strLen = strlen(pStr);
8749  for(size_t i = 0; i < strLen; ++i)
8750  {
8751  char ch = pStr[i];
8752  if(ch == '\\')
8753  {
8754  m_SB.Add("\\\\");
8755  }
8756  else if(ch == '"')
8757  {
8758  m_SB.Add("\\\"");
8759  }
8760  else if(ch >= 32)
8761  {
8762  m_SB.Add(ch);
8763  }
8764  else switch(ch)
8765  {
8766  case '\b':
8767  m_SB.Add("\\b");
8768  break;
8769  case '\f':
8770  m_SB.Add("\\f");
8771  break;
8772  case '\n':
8773  m_SB.Add("\\n");
8774  break;
8775  case '\r':
8776  m_SB.Add("\\r");
8777  break;
8778  case '\t':
8779  m_SB.Add("\\t");
8780  break;
8781  default:
8782  VMA_ASSERT(0 && "Character not currently supported.");
8783  break;
8784  }
8785  }
8786 }
8787 
8788 void VmaJsonWriter::ContinueString(uint32_t n)
8789 {
8790  VMA_ASSERT(m_InsideString);
8791  m_SB.AddNumber(n);
8792 }
8793 
8794 void VmaJsonWriter::ContinueString(uint64_t n)
8795 {
8796  VMA_ASSERT(m_InsideString);
8797  m_SB.AddNumber(n);
8798 }
8799 
8800 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
8801 {
8802  VMA_ASSERT(m_InsideString);
8803  m_SB.AddPointer(ptr);
8804 }
8805 
8806 void VmaJsonWriter::EndString(const char* pStr)
8807 {
8808  VMA_ASSERT(m_InsideString);
8809  if(pStr != VMA_NULL && pStr[0] != '\0')
8810  {
8811  ContinueString(pStr);
8812  }
8813  m_SB.Add('"');
8814  m_InsideString = false;
8815 }
8816 
8817 void VmaJsonWriter::WriteNumber(uint32_t n)
8818 {
8819  VMA_ASSERT(!m_InsideString);
8820  BeginValue(false);
8821  m_SB.AddNumber(n);
8822 }
8823 
8824 void VmaJsonWriter::WriteNumber(uint64_t n)
8825 {
8826  VMA_ASSERT(!m_InsideString);
8827  BeginValue(false);
8828  m_SB.AddNumber(n);
8829 }
8830 
8831 void VmaJsonWriter::WriteBool(bool b)
8832 {
8833  VMA_ASSERT(!m_InsideString);
8834  BeginValue(false);
8835  m_SB.Add(b ? "true" : "false");
8836 }
8837 
8838 void VmaJsonWriter::WriteNull()
8839 {
8840  VMA_ASSERT(!m_InsideString);
8841  BeginValue(false);
8842  m_SB.Add("null");
8843 }
8844 
8845 void VmaJsonWriter::BeginValue(bool isString)
8846 {
8847  if(!m_Stack.empty())
8848  {
8849  StackItem& currItem = m_Stack.back();
8850  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8851  currItem.valueCount % 2 == 0)
8852  {
8853  VMA_ASSERT(isString);
8854  }
8855 
8856  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8857  currItem.valueCount % 2 != 0)
8858  {
8859  m_SB.Add(": ");
8860  }
8861  else if(currItem.valueCount > 0)
8862  {
8863  m_SB.Add(", ");
8864  WriteIndent();
8865  }
8866  else
8867  {
8868  WriteIndent();
8869  }
8870  ++currItem.valueCount;
8871  }
8872 }
8873 
8874 void VmaJsonWriter::WriteIndent(bool oneLess)
8875 {
8876  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
8877  {
8878  m_SB.AddNewLine();
8879 
8880  size_t count = m_Stack.size();
8881  if(count > 0 && oneLess)
8882  {
8883  --count;
8884  }
8885  for(size_t i = 0; i < count; ++i)
8886  {
8887  m_SB.Add(INDENT);
8888  }
8889  }
8890 }
8891 
8892 #endif // #if VMA_STATS_STRING_ENABLED
8893 
8895 
8896 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
8897 {
8898  if(IsUserDataString())
8899  {
8900  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
8901 
8902  FreeUserDataString(hAllocator);
8903 
8904  if(pUserData != VMA_NULL)
8905  {
8906  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
8907  }
8908  }
8909  else
8910  {
8911  m_pUserData = pUserData;
8912  }
8913 }
8914 
8915 void VmaAllocation_T::ChangeBlockAllocation(
8916  VmaAllocator hAllocator,
8917  VmaDeviceMemoryBlock* block,
8918  VkDeviceSize offset)
8919 {
8920  VMA_ASSERT(block != VMA_NULL);
8921  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8922 
8923  // Move mapping reference counter from old block to new block.
8924  if(block != m_BlockAllocation.m_Block)
8925  {
8926  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8927  if(IsPersistentMap())
8928  ++mapRefCount;
8929  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8930  block->Map(hAllocator, mapRefCount, VMA_NULL);
8931  }
8932 
8933  m_BlockAllocation.m_Block = block;
8934  m_BlockAllocation.m_Offset = offset;
8935 }
8936 
8937 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8938 {
8939  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8940  m_BlockAllocation.m_Offset = newOffset;
8941 }
8942 
8943 VkDeviceSize VmaAllocation_T::GetOffset() const
8944 {
8945  switch(m_Type)
8946  {
8947  case ALLOCATION_TYPE_BLOCK:
8948  return m_BlockAllocation.m_Offset;
8949  case ALLOCATION_TYPE_DEDICATED:
8950  return 0;
8951  default:
8952  VMA_ASSERT(0);
8953  return 0;
8954  }
8955 }
8956 
8957 VkDeviceMemory VmaAllocation_T::GetMemory() const
8958 {
8959  switch(m_Type)
8960  {
8961  case ALLOCATION_TYPE_BLOCK:
8962  return m_BlockAllocation.m_Block->GetDeviceMemory();
8963  case ALLOCATION_TYPE_DEDICATED:
8964  return m_DedicatedAllocation.m_hMemory;
8965  default:
8966  VMA_ASSERT(0);
8967  return VK_NULL_HANDLE;
8968  }
8969 }
8970 
8971 void* VmaAllocation_T::GetMappedData() const
8972 {
8973  switch(m_Type)
8974  {
8975  case ALLOCATION_TYPE_BLOCK:
8976  if(m_MapCount != 0)
8977  {
8978  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
8979  VMA_ASSERT(pBlockData != VMA_NULL);
8980  return (char*)pBlockData + m_BlockAllocation.m_Offset;
8981  }
8982  else
8983  {
8984  return VMA_NULL;
8985  }
8986  break;
8987  case ALLOCATION_TYPE_DEDICATED:
8988  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
8989  return m_DedicatedAllocation.m_pMappedData;
8990  default:
8991  VMA_ASSERT(0);
8992  return VMA_NULL;
8993  }
8994 }
8995 
8996 bool VmaAllocation_T::CanBecomeLost() const
8997 {
8998  switch(m_Type)
8999  {
9000  case ALLOCATION_TYPE_BLOCK:
9001  return m_BlockAllocation.m_CanBecomeLost;
9002  case ALLOCATION_TYPE_DEDICATED:
9003  return false;
9004  default:
9005  VMA_ASSERT(0);
9006  return false;
9007  }
9008 }
9009 
9010 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9011 {
9012  VMA_ASSERT(CanBecomeLost());
9013 
9014  /*
9015  Warning: This is a carefully designed algorithm.
9016  Do not modify unless you really know what you're doing :)
9017  */
9018  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
9019  for(;;)
9020  {
9021  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
9022  {
9023  VMA_ASSERT(0);
9024  return false;
9025  }
9026  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
9027  {
9028  return false;
9029  }
9030  else // Last use time earlier than current time.
9031  {
9032  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
9033  {
9034  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
9035  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
9036  return true;
9037  }
9038  }
9039  }
9040 }
9041 
9042 #if VMA_STATS_STRING_ENABLED
9043 
9044 // Correspond to values of enum VmaSuballocationType.
9045 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
9046  "FREE",
9047  "UNKNOWN",
9048  "BUFFER",
9049  "IMAGE_UNKNOWN",
9050  "IMAGE_LINEAR",
9051  "IMAGE_OPTIMAL",
9052 };
9053 
9054 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
9055 {
9056  json.WriteString("Type");
9057  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
9058 
9059  json.WriteString("Size");
9060  json.WriteNumber(m_Size);
9061 
9062  if(m_pUserData != VMA_NULL)
9063  {
9064  json.WriteString("UserData");
9065  if(IsUserDataString())
9066  {
9067  json.WriteString((const char*)m_pUserData);
9068  }
9069  else
9070  {
9071  json.BeginString();
9072  json.ContinueString_Pointer(m_pUserData);
9073  json.EndString();
9074  }
9075  }
9076 
9077  json.WriteString("CreationFrameIndex");
9078  json.WriteNumber(m_CreationFrameIndex);
9079 
9080  json.WriteString("LastUseFrameIndex");
9081  json.WriteNumber(GetLastUseFrameIndex());
9082 
9083  if(m_BufferImageUsage != 0)
9084  {
9085  json.WriteString("Usage");
9086  json.WriteNumber(m_BufferImageUsage);
9087  }
9088 }
9089 
9090 #endif
9091 
9092 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
9093 {
9094  VMA_ASSERT(IsUserDataString());
9095  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
9096  m_pUserData = VMA_NULL;
9097 }
9098 
9099 void VmaAllocation_T::BlockAllocMap()
9100 {
9101  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
9102 
9103  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
9104  {
9105  ++m_MapCount;
9106  }
9107  else
9108  {
9109  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
9110  }
9111 }
9112 
9113 void VmaAllocation_T::BlockAllocUnmap()
9114 {
9115  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
9116 
9117  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
9118  {
9119  --m_MapCount;
9120  }
9121  else
9122  {
9123  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
9124  }
9125 }
9126 
9127 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
9128 {
9129  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
9130 
9131  if(m_MapCount != 0)
9132  {
9133  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
9134  {
9135  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
9136  *ppData = m_DedicatedAllocation.m_pMappedData;
9137  ++m_MapCount;
9138  return VK_SUCCESS;
9139  }
9140  else
9141  {
9142  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
9143  return VK_ERROR_MEMORY_MAP_FAILED;
9144  }
9145  }
9146  else
9147  {
9148  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
9149  hAllocator->m_hDevice,
9150  m_DedicatedAllocation.m_hMemory,
9151  0, // offset
9152  VK_WHOLE_SIZE,
9153  0, // flags
9154  ppData);
9155  if(result == VK_SUCCESS)
9156  {
9157  m_DedicatedAllocation.m_pMappedData = *ppData;
9158  m_MapCount = 1;
9159  }
9160  return result;
9161  }
9162 }
9163 
9164 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
9165 {
9166  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
9167 
9168  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
9169  {
9170  --m_MapCount;
9171  if(m_MapCount == 0)
9172  {
9173  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
9174  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
9175  hAllocator->m_hDevice,
9176  m_DedicatedAllocation.m_hMemory);
9177  }
9178  }
9179  else
9180  {
9181  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
9182  }
9183 }
9184 
9185 #if VMA_STATS_STRING_ENABLED
9186 
9187 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
9188 {
9189  json.BeginObject();
9190 
9191  json.WriteString("Blocks");
9192  json.WriteNumber(stat.blockCount);
9193 
9194  json.WriteString("Allocations");
9195  json.WriteNumber(stat.allocationCount);
9196 
9197  json.WriteString("UnusedRanges");
9198  json.WriteNumber(stat.unusedRangeCount);
9199 
9200  json.WriteString("UsedBytes");
9201  json.WriteNumber(stat.usedBytes);
9202 
9203  json.WriteString("UnusedBytes");
9204  json.WriteNumber(stat.unusedBytes);
9205 
9206  if(stat.allocationCount > 1)
9207  {
9208  json.WriteString("AllocationSize");
9209  json.BeginObject(true);
9210  json.WriteString("Min");
9211  json.WriteNumber(stat.allocationSizeMin);
9212  json.WriteString("Avg");
9213  json.WriteNumber(stat.allocationSizeAvg);
9214  json.WriteString("Max");
9215  json.WriteNumber(stat.allocationSizeMax);
9216  json.EndObject();
9217  }
9218 
9219  if(stat.unusedRangeCount > 1)
9220  {
9221  json.WriteString("UnusedRangeSize");
9222  json.BeginObject(true);
9223  json.WriteString("Min");
9224  json.WriteNumber(stat.unusedRangeSizeMin);
9225  json.WriteString("Avg");
9226  json.WriteNumber(stat.unusedRangeSizeAvg);
9227  json.WriteString("Max");
9228  json.WriteNumber(stat.unusedRangeSizeMax);
9229  json.EndObject();
9230  }
9231 
9232  json.EndObject();
9233 }
9234 
9235 #endif // #if VMA_STATS_STRING_ENABLED
9236 
9237 struct VmaSuballocationItemSizeLess
9238 {
9239  bool operator()(
9240  const VmaSuballocationList::iterator lhs,
9241  const VmaSuballocationList::iterator rhs) const
9242  {
9243  return lhs->size < rhs->size;
9244  }
9245  bool operator()(
9246  const VmaSuballocationList::iterator lhs,
9247  VkDeviceSize rhsSize) const
9248  {
9249  return lhs->size < rhsSize;
9250  }
9251 };
9252 
9253 
9255 // class VmaBlockMetadata
9256 
9257 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
9258  m_Size(0),
9259  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
9260 {
9261 }
9262 
9263 #if VMA_STATS_STRING_ENABLED
9264 
9265 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
9266  VkDeviceSize unusedBytes,
9267  size_t allocationCount,
9268  size_t unusedRangeCount) const
9269 {
9270  json.BeginObject();
9271 
9272  json.WriteString("TotalBytes");
9273  json.WriteNumber(GetSize());
9274 
9275  json.WriteString("UnusedBytes");
9276  json.WriteNumber(unusedBytes);
9277 
9278  json.WriteString("Allocations");
9279  json.WriteNumber((uint64_t)allocationCount);
9280 
9281  json.WriteString("UnusedRanges");
9282  json.WriteNumber((uint64_t)unusedRangeCount);
9283 
9284  json.WriteString("Suballocations");
9285  json.BeginArray();
9286 }
9287 
9288 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
9289  VkDeviceSize offset,
9290  VmaAllocation hAllocation) const
9291 {
9292  json.BeginObject(true);
9293 
9294  json.WriteString("Offset");
9295  json.WriteNumber(offset);
9296 
9297  hAllocation->PrintParameters(json);
9298 
9299  json.EndObject();
9300 }
9301 
9302 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
9303  VkDeviceSize offset,
9304  VkDeviceSize size) const
9305 {
9306  json.BeginObject(true);
9307 
9308  json.WriteString("Offset");
9309  json.WriteNumber(offset);
9310 
9311  json.WriteString("Type");
9312  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
9313 
9314  json.WriteString("Size");
9315  json.WriteNumber(size);
9316 
9317  json.EndObject();
9318 }
9319 
9320 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
9321 {
9322  json.EndArray();
9323  json.EndObject();
9324 }
9325 
9326 #endif // #if VMA_STATS_STRING_ENABLED
9327 
9329 // class VmaBlockMetadata_Generic
9330 
9331 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
9332  VmaBlockMetadata(hAllocator),
9333  m_FreeCount(0),
9334  m_SumFreeSize(0),
9335  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9336  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
9337 {
9338 }
9339 
9340 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
9341 {
9342 }
9343 
9344 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
9345 {
9346  VmaBlockMetadata::Init(size);
9347 
9348  m_FreeCount = 1;
9349  m_SumFreeSize = size;
9350 
9351  VmaSuballocation suballoc = {};
9352  suballoc.offset = 0;
9353  suballoc.size = size;
9354  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9355  suballoc.hAllocation = VK_NULL_HANDLE;
9356 
9357  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9358  m_Suballocations.push_back(suballoc);
9359  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
9360  --suballocItem;
9361  m_FreeSuballocationsBySize.push_back(suballocItem);
9362 }
9363 
9364 bool VmaBlockMetadata_Generic::Validate() const
9365 {
9366  VMA_VALIDATE(!m_Suballocations.empty());
9367 
9368  // Expected offset of new suballocation as calculated from previous ones.
9369  VkDeviceSize calculatedOffset = 0;
9370  // Expected number of free suballocations as calculated from traversing their list.
9371  uint32_t calculatedFreeCount = 0;
9372  // Expected sum size of free suballocations as calculated from traversing their list.
9373  VkDeviceSize calculatedSumFreeSize = 0;
9374  // Expected number of free suballocations that should be registered in
9375  // m_FreeSuballocationsBySize calculated from traversing their list.
9376  size_t freeSuballocationsToRegister = 0;
9377  // True if previous visited suballocation was free.
9378  bool prevFree = false;
9379 
9380  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9381  suballocItem != m_Suballocations.cend();
9382  ++suballocItem)
9383  {
9384  const VmaSuballocation& subAlloc = *suballocItem;
9385 
9386  // Actual offset of this suballocation doesn't match expected one.
9387  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
9388 
9389  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
9390  // Two adjacent free suballocations are invalid. They should be merged.
9391  VMA_VALIDATE(!prevFree || !currFree);
9392 
9393  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
9394 
9395  if(currFree)
9396  {
9397  calculatedSumFreeSize += subAlloc.size;
9398  ++calculatedFreeCount;
9399  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9400  {
9401  ++freeSuballocationsToRegister;
9402  }
9403 
9404  // Margin required between allocations - every free space must be at least that large.
9405  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
9406  }
9407  else
9408  {
9409  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
9410  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
9411 
9412  // Margin required between allocations - previous allocation must be free.
9413  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
9414  }
9415 
9416  calculatedOffset += subAlloc.size;
9417  prevFree = currFree;
9418  }
9419 
9420  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
9421  // match expected one.
9422  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
9423 
9424  VkDeviceSize lastSize = 0;
9425  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
9426  {
9427  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
9428 
9429  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
9430  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9431  // They must be sorted by size ascending.
9432  VMA_VALIDATE(suballocItem->size >= lastSize);
9433 
9434  lastSize = suballocItem->size;
9435  }
9436 
9437  // Check if totals match calculated values.
9438  VMA_VALIDATE(ValidateFreeSuballocationList());
9439  VMA_VALIDATE(calculatedOffset == GetSize());
9440  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
9441  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
9442 
9443  return true;
9444 }
9445 
9446 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
9447 {
9448  if(!m_FreeSuballocationsBySize.empty())
9449  {
9450  return m_FreeSuballocationsBySize.back()->size;
9451  }
9452  else
9453  {
9454  return 0;
9455  }
9456 }
9457 
9458 bool VmaBlockMetadata_Generic::IsEmpty() const
9459 {
9460  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
9461 }
9462 
9463 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9464 {
9465  outInfo.blockCount = 1;
9466 
9467  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9468  outInfo.allocationCount = rangeCount - m_FreeCount;
9469  outInfo.unusedRangeCount = m_FreeCount;
9470 
9471  outInfo.unusedBytes = m_SumFreeSize;
9472  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
9473 
9474  outInfo.allocationSizeMin = UINT64_MAX;
9475  outInfo.allocationSizeMax = 0;
9476  outInfo.unusedRangeSizeMin = UINT64_MAX;
9477  outInfo.unusedRangeSizeMax = 0;
9478 
9479  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9480  suballocItem != m_Suballocations.cend();
9481  ++suballocItem)
9482  {
9483  const VmaSuballocation& suballoc = *suballocItem;
9484  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9485  {
9486  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9487  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
9488  }
9489  else
9490  {
9491  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
9492  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
9493  }
9494  }
9495 }
9496 
9497 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
9498 {
9499  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9500 
9501  inoutStats.size += GetSize();
9502  inoutStats.unusedSize += m_SumFreeSize;
9503  inoutStats.allocationCount += rangeCount - m_FreeCount;
9504  inoutStats.unusedRangeCount += m_FreeCount;
9505  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9506 }
9507 
9508 #if VMA_STATS_STRING_ENABLED
9509 
9510 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
9511 {
9512  PrintDetailedMap_Begin(json,
9513  m_SumFreeSize, // unusedBytes
9514  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
9515  m_FreeCount); // unusedRangeCount
9516 
9517  size_t i = 0;
9518  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9519  suballocItem != m_Suballocations.cend();
9520  ++suballocItem, ++i)
9521  {
9522  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9523  {
9524  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
9525  }
9526  else
9527  {
9528  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
9529  }
9530  }
9531 
9532  PrintDetailedMap_End(json);
9533 }
9534 
9535 #endif // #if VMA_STATS_STRING_ENABLED
9536 
9537 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
9538  uint32_t currentFrameIndex,
9539  uint32_t frameInUseCount,
9540  VkDeviceSize bufferImageGranularity,
9541  VkDeviceSize allocSize,
9542  VkDeviceSize allocAlignment,
9543  bool upperAddress,
9544  VmaSuballocationType allocType,
9545  bool canMakeOtherLost,
9546  uint32_t strategy,
9547  VmaAllocationRequest* pAllocationRequest)
9548 {
9549  VMA_ASSERT(allocSize > 0);
9550  VMA_ASSERT(!upperAddress);
9551  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9552  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9553  VMA_HEAVY_ASSERT(Validate());
9554 
9555  pAllocationRequest->type = VmaAllocationRequestType::Normal;
9556 
9557  // There is not enough total free space in this block to fullfill the request: Early return.
9558  if(canMakeOtherLost == false &&
9559  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
9560  {
9561  return false;
9562  }
9563 
9564  // New algorithm, efficiently searching freeSuballocationsBySize.
9565  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
9566  if(freeSuballocCount > 0)
9567  {
9569  {
9570  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
9571  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9572  m_FreeSuballocationsBySize.data(),
9573  m_FreeSuballocationsBySize.data() + freeSuballocCount,
9574  allocSize + 2 * VMA_DEBUG_MARGIN,
9575  VmaSuballocationItemSizeLess());
9576  size_t index = it - m_FreeSuballocationsBySize.data();
9577  for(; index < freeSuballocCount; ++index)
9578  {
9579  if(CheckAllocation(
9580  currentFrameIndex,
9581  frameInUseCount,
9582  bufferImageGranularity,
9583  allocSize,
9584  allocAlignment,
9585  allocType,
9586  m_FreeSuballocationsBySize[index],
9587  false, // canMakeOtherLost
9588  &pAllocationRequest->offset,
9589  &pAllocationRequest->itemsToMakeLostCount,
9590  &pAllocationRequest->sumFreeSize,
9591  &pAllocationRequest->sumItemSize))
9592  {
9593  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9594  return true;
9595  }
9596  }
9597  }
9598  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
9599  {
9600  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9601  it != m_Suballocations.end();
9602  ++it)
9603  {
9604  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
9605  currentFrameIndex,
9606  frameInUseCount,
9607  bufferImageGranularity,
9608  allocSize,
9609  allocAlignment,
9610  allocType,
9611  it,
9612  false, // canMakeOtherLost
9613  &pAllocationRequest->offset,
9614  &pAllocationRequest->itemsToMakeLostCount,
9615  &pAllocationRequest->sumFreeSize,
9616  &pAllocationRequest->sumItemSize))
9617  {
9618  pAllocationRequest->item = it;
9619  return true;
9620  }
9621  }
9622  }
9623  else // WORST_FIT, FIRST_FIT
9624  {
9625  // Search staring from biggest suballocations.
9626  for(size_t index = freeSuballocCount; index--; )
9627  {
9628  if(CheckAllocation(
9629  currentFrameIndex,
9630  frameInUseCount,
9631  bufferImageGranularity,
9632  allocSize,
9633  allocAlignment,
9634  allocType,
9635  m_FreeSuballocationsBySize[index],
9636  false, // canMakeOtherLost
9637  &pAllocationRequest->offset,
9638  &pAllocationRequest->itemsToMakeLostCount,
9639  &pAllocationRequest->sumFreeSize,
9640  &pAllocationRequest->sumItemSize))
9641  {
9642  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9643  return true;
9644  }
9645  }
9646  }
9647  }
9648 
9649  if(canMakeOtherLost)
9650  {
9651  // Brute-force algorithm. TODO: Come up with something better.
9652 
9653  bool found = false;
9654  VmaAllocationRequest tmpAllocRequest = {};
9655  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
9656  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
9657  suballocIt != m_Suballocations.end();
9658  ++suballocIt)
9659  {
9660  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
9661  suballocIt->hAllocation->CanBecomeLost())
9662  {
9663  if(CheckAllocation(
9664  currentFrameIndex,
9665  frameInUseCount,
9666  bufferImageGranularity,
9667  allocSize,
9668  allocAlignment,
9669  allocType,
9670  suballocIt,
9671  canMakeOtherLost,
9672  &tmpAllocRequest.offset,
9673  &tmpAllocRequest.itemsToMakeLostCount,
9674  &tmpAllocRequest.sumFreeSize,
9675  &tmpAllocRequest.sumItemSize))
9676  {
9678  {
9679  *pAllocationRequest = tmpAllocRequest;
9680  pAllocationRequest->item = suballocIt;
9681  break;
9682  }
9683  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
9684  {
9685  *pAllocationRequest = tmpAllocRequest;
9686  pAllocationRequest->item = suballocIt;
9687  found = true;
9688  }
9689  }
9690  }
9691  }
9692 
9693  return found;
9694  }
9695 
9696  return false;
9697 }
9698 
9699 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
9700  uint32_t currentFrameIndex,
9701  uint32_t frameInUseCount,
9702  VmaAllocationRequest* pAllocationRequest)
9703 {
9704  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
9705 
9706  while(pAllocationRequest->itemsToMakeLostCount > 0)
9707  {
9708  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
9709  {
9710  ++pAllocationRequest->item;
9711  }
9712  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9713  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
9714  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
9715  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9716  {
9717  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
9718  --pAllocationRequest->itemsToMakeLostCount;
9719  }
9720  else
9721  {
9722  return false;
9723  }
9724  }
9725 
9726  VMA_HEAVY_ASSERT(Validate());
9727  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9728  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
9729 
9730  return true;
9731 }
9732 
9733 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9734 {
9735  uint32_t lostAllocationCount = 0;
9736  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9737  it != m_Suballocations.end();
9738  ++it)
9739  {
9740  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
9741  it->hAllocation->CanBecomeLost() &&
9742  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9743  {
9744  it = FreeSuballocation(it);
9745  ++lostAllocationCount;
9746  }
9747  }
9748  return lostAllocationCount;
9749 }
9750 
9751 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
9752 {
9753  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9754  it != m_Suballocations.end();
9755  ++it)
9756  {
9757  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
9758  {
9759  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
9760  {
9761  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9762  return VK_ERROR_VALIDATION_FAILED_EXT;
9763  }
9764  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
9765  {
9766  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9767  return VK_ERROR_VALIDATION_FAILED_EXT;
9768  }
9769  }
9770  }
9771 
9772  return VK_SUCCESS;
9773 }
9774 
9775 void VmaBlockMetadata_Generic::Alloc(
9776  const VmaAllocationRequest& request,
9777  VmaSuballocationType type,
9778  VkDeviceSize allocSize,
9779  VmaAllocation hAllocation)
9780 {
9781  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
9782  VMA_ASSERT(request.item != m_Suballocations.end());
9783  VmaSuballocation& suballoc = *request.item;
9784  // Given suballocation is a free block.
9785  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9786  // Given offset is inside this suballocation.
9787  VMA_ASSERT(request.offset >= suballoc.offset);
9788  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
9789  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
9790  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
9791 
9792  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
9793  // it to become used.
9794  UnregisterFreeSuballocation(request.item);
9795 
9796  suballoc.offset = request.offset;
9797  suballoc.size = allocSize;
9798  suballoc.type = type;
9799  suballoc.hAllocation = hAllocation;
9800 
9801  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
9802  if(paddingEnd)
9803  {
9804  VmaSuballocation paddingSuballoc = {};
9805  paddingSuballoc.offset = request.offset + allocSize;
9806  paddingSuballoc.size = paddingEnd;
9807  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9808  VmaSuballocationList::iterator next = request.item;
9809  ++next;
9810  const VmaSuballocationList::iterator paddingEndItem =
9811  m_Suballocations.insert(next, paddingSuballoc);
9812  RegisterFreeSuballocation(paddingEndItem);
9813  }
9814 
9815  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
9816  if(paddingBegin)
9817  {
9818  VmaSuballocation paddingSuballoc = {};
9819  paddingSuballoc.offset = request.offset - paddingBegin;
9820  paddingSuballoc.size = paddingBegin;
9821  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9822  const VmaSuballocationList::iterator paddingBeginItem =
9823  m_Suballocations.insert(request.item, paddingSuballoc);
9824  RegisterFreeSuballocation(paddingBeginItem);
9825  }
9826 
9827  // Update totals.
9828  m_FreeCount = m_FreeCount - 1;
9829  if(paddingBegin > 0)
9830  {
9831  ++m_FreeCount;
9832  }
9833  if(paddingEnd > 0)
9834  {
9835  ++m_FreeCount;
9836  }
9837  m_SumFreeSize -= allocSize;
9838 }
9839 
9840 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
9841 {
9842  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9843  suballocItem != m_Suballocations.end();
9844  ++suballocItem)
9845  {
9846  VmaSuballocation& suballoc = *suballocItem;
9847  if(suballoc.hAllocation == allocation)
9848  {
9849  FreeSuballocation(suballocItem);
9850  VMA_HEAVY_ASSERT(Validate());
9851  return;
9852  }
9853  }
9854  VMA_ASSERT(0 && "Not found!");
9855 }
9856 
9857 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
9858 {
9859  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9860  suballocItem != m_Suballocations.end();
9861  ++suballocItem)
9862  {
9863  VmaSuballocation& suballoc = *suballocItem;
9864  if(suballoc.offset == offset)
9865  {
9866  FreeSuballocation(suballocItem);
9867  return;
9868  }
9869  }
9870  VMA_ASSERT(0 && "Not found!");
9871 }
9872 
9873 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
9874 {
9875  VkDeviceSize lastSize = 0;
9876  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
9877  {
9878  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
9879 
9880  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
9881  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9882  VMA_VALIDATE(it->size >= lastSize);
9883  lastSize = it->size;
9884  }
9885  return true;
9886 }
9887 
9888 bool VmaBlockMetadata_Generic::CheckAllocation(
9889  uint32_t currentFrameIndex,
9890  uint32_t frameInUseCount,
9891  VkDeviceSize bufferImageGranularity,
9892  VkDeviceSize allocSize,
9893  VkDeviceSize allocAlignment,
9894  VmaSuballocationType allocType,
9895  VmaSuballocationList::const_iterator suballocItem,
9896  bool canMakeOtherLost,
9897  VkDeviceSize* pOffset,
9898  size_t* itemsToMakeLostCount,
9899  VkDeviceSize* pSumFreeSize,
9900  VkDeviceSize* pSumItemSize) const
9901 {
9902  VMA_ASSERT(allocSize > 0);
9903  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9904  VMA_ASSERT(suballocItem != m_Suballocations.cend());
9905  VMA_ASSERT(pOffset != VMA_NULL);
9906 
9907  *itemsToMakeLostCount = 0;
9908  *pSumFreeSize = 0;
9909  *pSumItemSize = 0;
9910 
9911  if(canMakeOtherLost)
9912  {
9913  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9914  {
9915  *pSumFreeSize = suballocItem->size;
9916  }
9917  else
9918  {
9919  if(suballocItem->hAllocation->CanBecomeLost() &&
9920  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9921  {
9922  ++*itemsToMakeLostCount;
9923  *pSumItemSize = suballocItem->size;
9924  }
9925  else
9926  {
9927  return false;
9928  }
9929  }
9930 
9931  // Remaining size is too small for this request: Early return.
9932  if(GetSize() - suballocItem->offset < allocSize)
9933  {
9934  return false;
9935  }
9936 
9937  // Start from offset equal to beginning of this suballocation.
9938  *pOffset = suballocItem->offset;
9939 
9940  // Apply VMA_DEBUG_MARGIN at the beginning.
9941  if(VMA_DEBUG_MARGIN > 0)
9942  {
9943  *pOffset += VMA_DEBUG_MARGIN;
9944  }
9945 
9946  // Apply alignment.
9947  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9948 
9949  // Check previous suballocations for BufferImageGranularity conflicts.
9950  // Make bigger alignment if necessary.
9951  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
9952  {
9953  bool bufferImageGranularityConflict = false;
9954  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9955  while(prevSuballocItem != m_Suballocations.cbegin())
9956  {
9957  --prevSuballocItem;
9958  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9959  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9960  {
9961  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9962  {
9963  bufferImageGranularityConflict = true;
9964  break;
9965  }
9966  }
9967  else
9968  // Already on previous page.
9969  break;
9970  }
9971  if(bufferImageGranularityConflict)
9972  {
9973  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9974  }
9975  }
9976 
9977  // Now that we have final *pOffset, check if we are past suballocItem.
9978  // If yes, return false - this function should be called for another suballocItem as starting point.
9979  if(*pOffset >= suballocItem->offset + suballocItem->size)
9980  {
9981  return false;
9982  }
9983 
9984  // Calculate padding at the beginning based on current offset.
9985  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
9986 
9987  // Calculate required margin at the end.
9988  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9989 
9990  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
9991  // Another early return check.
9992  if(suballocItem->offset + totalSize > GetSize())
9993  {
9994  return false;
9995  }
9996 
9997  // Advance lastSuballocItem until desired size is reached.
9998  // Update itemsToMakeLostCount.
9999  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
10000  if(totalSize > suballocItem->size)
10001  {
10002  VkDeviceSize remainingSize = totalSize - suballocItem->size;
10003  while(remainingSize > 0)
10004  {
10005  ++lastSuballocItem;
10006  if(lastSuballocItem == m_Suballocations.cend())
10007  {
10008  return false;
10009  }
10010  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
10011  {
10012  *pSumFreeSize += lastSuballocItem->size;
10013  }
10014  else
10015  {
10016  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
10017  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
10018  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10019  {
10020  ++*itemsToMakeLostCount;
10021  *pSumItemSize += lastSuballocItem->size;
10022  }
10023  else
10024  {
10025  return false;
10026  }
10027  }
10028  remainingSize = (lastSuballocItem->size < remainingSize) ?
10029  remainingSize - lastSuballocItem->size : 0;
10030  }
10031  }
10032 
10033  // Check next suballocations for BufferImageGranularity conflicts.
10034  // If conflict exists, we must mark more allocations lost or fail.
10035  if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
10036  {
10037  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
10038  ++nextSuballocItem;
10039  while(nextSuballocItem != m_Suballocations.cend())
10040  {
10041  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
10042  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10043  {
10044  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10045  {
10046  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
10047  if(nextSuballoc.hAllocation->CanBecomeLost() &&
10048  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
10049  {
10050  ++*itemsToMakeLostCount;
10051  }
10052  else
10053  {
10054  return false;
10055  }
10056  }
10057  }
10058  else
10059  {
10060  // Already on next page.
10061  break;
10062  }
10063  ++nextSuballocItem;
10064  }
10065  }
10066  }
10067  else
10068  {
10069  const VmaSuballocation& suballoc = *suballocItem;
10070  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10071 
10072  *pSumFreeSize = suballoc.size;
10073 
10074  // Size of this suballocation is too small for this request: Early return.
10075  if(suballoc.size < allocSize)
10076  {
10077  return false;
10078  }
10079 
10080  // Start from offset equal to beginning of this suballocation.
10081  *pOffset = suballoc.offset;
10082 
10083  // Apply VMA_DEBUG_MARGIN at the beginning.
10084  if(VMA_DEBUG_MARGIN > 0)
10085  {
10086  *pOffset += VMA_DEBUG_MARGIN;
10087  }
10088 
10089  // Apply alignment.
10090  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
10091 
10092  // Check previous suballocations for BufferImageGranularity conflicts.
10093  // Make bigger alignment if necessary.
10094  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
10095  {
10096  bool bufferImageGranularityConflict = false;
10097  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
10098  while(prevSuballocItem != m_Suballocations.cbegin())
10099  {
10100  --prevSuballocItem;
10101  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
10102  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
10103  {
10104  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
10105  {
10106  bufferImageGranularityConflict = true;
10107  break;
10108  }
10109  }
10110  else
10111  // Already on previous page.
10112  break;
10113  }
10114  if(bufferImageGranularityConflict)
10115  {
10116  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
10117  }
10118  }
10119 
10120  // Calculate padding at the beginning based on current offset.
10121  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
10122 
10123  // Calculate required margin at the end.
10124  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
10125 
10126  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
10127  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
10128  {
10129  return false;
10130  }
10131 
10132  // Check next suballocations for BufferImageGranularity conflicts.
10133  // If conflict exists, allocation cannot be made here.
10134  if(allocSize % bufferImageGranularity || *pOffset % bufferImageGranularity)
10135  {
10136  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
10137  ++nextSuballocItem;
10138  while(nextSuballocItem != m_Suballocations.cend())
10139  {
10140  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
10141  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10142  {
10143  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
10144  {
10145  return false;
10146  }
10147  }
10148  else
10149  {
10150  // Already on next page.
10151  break;
10152  }
10153  ++nextSuballocItem;
10154  }
10155  }
10156  }
10157 
10158  // All tests passed: Success. pOffset is already filled.
10159  return true;
10160 }
10161 
10162 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
10163 {
10164  VMA_ASSERT(item != m_Suballocations.end());
10165  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
10166 
10167  VmaSuballocationList::iterator nextItem = item;
10168  ++nextItem;
10169  VMA_ASSERT(nextItem != m_Suballocations.end());
10170  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
10171 
10172  item->size += nextItem->size;
10173  --m_FreeCount;
10174  m_Suballocations.erase(nextItem);
10175 }
10176 
10177 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
10178 {
10179  // Change this suballocation to be marked as free.
10180  VmaSuballocation& suballoc = *suballocItem;
10181  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
10182  suballoc.hAllocation = VK_NULL_HANDLE;
10183 
10184  // Update totals.
10185  ++m_FreeCount;
10186  m_SumFreeSize += suballoc.size;
10187 
10188  // Merge with previous and/or next suballocation if it's also free.
10189  bool mergeWithNext = false;
10190  bool mergeWithPrev = false;
10191 
10192  VmaSuballocationList::iterator nextItem = suballocItem;
10193  ++nextItem;
10194  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
10195  {
10196  mergeWithNext = true;
10197  }
10198 
10199  VmaSuballocationList::iterator prevItem = suballocItem;
10200  if(suballocItem != m_Suballocations.begin())
10201  {
10202  --prevItem;
10203  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
10204  {
10205  mergeWithPrev = true;
10206  }
10207  }
10208 
10209  if(mergeWithNext)
10210  {
10211  UnregisterFreeSuballocation(nextItem);
10212  MergeFreeWithNext(suballocItem);
10213  }
10214 
10215  if(mergeWithPrev)
10216  {
10217  UnregisterFreeSuballocation(prevItem);
10218  MergeFreeWithNext(prevItem);
10219  RegisterFreeSuballocation(prevItem);
10220  return prevItem;
10221  }
10222  else
10223  {
10224  RegisterFreeSuballocation(suballocItem);
10225  return suballocItem;
10226  }
10227 }
10228 
10229 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
10230 {
10231  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
10232  VMA_ASSERT(item->size > 0);
10233 
10234  // You may want to enable this validation at the beginning or at the end of
10235  // this function, depending on what do you want to check.
10236  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10237 
10238  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
10239  {
10240  if(m_FreeSuballocationsBySize.empty())
10241  {
10242  m_FreeSuballocationsBySize.push_back(item);
10243  }
10244  else
10245  {
10246  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
10247  }
10248  }
10249 
10250  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10251 }
10252 
10253 
10254 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
10255 {
10256  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
10257  VMA_ASSERT(item->size > 0);
10258 
10259  // You may want to enable this validation at the beginning or at the end of
10260  // this function, depending on what do you want to check.
10261  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10262 
10263  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
10264  {
10265  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
10266  m_FreeSuballocationsBySize.data(),
10267  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
10268  item,
10269  VmaSuballocationItemSizeLess());
10270  for(size_t index = it - m_FreeSuballocationsBySize.data();
10271  index < m_FreeSuballocationsBySize.size();
10272  ++index)
10273  {
10274  if(m_FreeSuballocationsBySize[index] == item)
10275  {
10276  VmaVectorRemove(m_FreeSuballocationsBySize, index);
10277  return;
10278  }
10279  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
10280  }
10281  VMA_ASSERT(0 && "Not found.");
10282  }
10283 
10284  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
10285 }
10286 
10287 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
10288  VkDeviceSize bufferImageGranularity,
10289  VmaSuballocationType& inOutPrevSuballocType) const
10290 {
10291  if(bufferImageGranularity == 1 || IsEmpty())
10292  {
10293  return false;
10294  }
10295 
10296  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
10297  bool typeConflictFound = false;
10298  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
10299  it != m_Suballocations.cend();
10300  ++it)
10301  {
10302  const VmaSuballocationType suballocType = it->type;
10303  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
10304  {
10305  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
10306  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
10307  {
10308  typeConflictFound = true;
10309  }
10310  inOutPrevSuballocType = suballocType;
10311  }
10312  }
10313 
10314  return typeConflictFound || minAlignment >= bufferImageGranularity;
10315 }
10316 
10318 // class VmaBlockMetadata_Linear
10319 
10320 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
10321  VmaBlockMetadata(hAllocator),
10322  m_SumFreeSize(0),
10323  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
10324  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
10325  m_1stVectorIndex(0),
10326  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
10327  m_1stNullItemsBeginCount(0),
10328  m_1stNullItemsMiddleCount(0),
10329  m_2ndNullItemsCount(0)
10330 {
10331 }
10332 
10333 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
10334 {
10335 }
10336 
10337 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
10338 {
10339  VmaBlockMetadata::Init(size);
10340  m_SumFreeSize = size;
10341 }
10342 
10343 bool VmaBlockMetadata_Linear::Validate() const
10344 {
10345  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10346  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10347 
10348  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
10349  VMA_VALIDATE(!suballocations1st.empty() ||
10350  suballocations2nd.empty() ||
10351  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
10352 
10353  if(!suballocations1st.empty())
10354  {
10355  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
10356  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
10357  // Null item at the end should be just pop_back().
10358  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
10359  }
10360  if(!suballocations2nd.empty())
10361  {
10362  // Null item at the end should be just pop_back().
10363  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
10364  }
10365 
10366  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
10367  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
10368 
10369  VkDeviceSize sumUsedSize = 0;
10370  const size_t suballoc1stCount = suballocations1st.size();
10371  VkDeviceSize offset = VMA_DEBUG_MARGIN;
10372 
10373  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10374  {
10375  const size_t suballoc2ndCount = suballocations2nd.size();
10376  size_t nullItem2ndCount = 0;
10377  for(size_t i = 0; i < suballoc2ndCount; ++i)
10378  {
10379  const VmaSuballocation& suballoc = suballocations2nd[i];
10380  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10381 
10382  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10383  VMA_VALIDATE(suballoc.offset >= offset);
10384 
10385  if(!currFree)
10386  {
10387  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10388  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10389  sumUsedSize += suballoc.size;
10390  }
10391  else
10392  {
10393  ++nullItem2ndCount;
10394  }
10395 
10396  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10397  }
10398 
10399  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10400  }
10401 
10402  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
10403  {
10404  const VmaSuballocation& suballoc = suballocations1st[i];
10405  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
10406  suballoc.hAllocation == VK_NULL_HANDLE);
10407  }
10408 
10409  size_t nullItem1stCount = m_1stNullItemsBeginCount;
10410 
10411  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
10412  {
10413  const VmaSuballocation& suballoc = suballocations1st[i];
10414  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10415 
10416  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10417  VMA_VALIDATE(suballoc.offset >= offset);
10418  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
10419 
10420  if(!currFree)
10421  {
10422  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10423  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10424  sumUsedSize += suballoc.size;
10425  }
10426  else
10427  {
10428  ++nullItem1stCount;
10429  }
10430 
10431  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10432  }
10433  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
10434 
10435  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10436  {
10437  const size_t suballoc2ndCount = suballocations2nd.size();
10438  size_t nullItem2ndCount = 0;
10439  for(size_t i = suballoc2ndCount; i--; )
10440  {
10441  const VmaSuballocation& suballoc = suballocations2nd[i];
10442  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10443 
10444  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10445  VMA_VALIDATE(suballoc.offset >= offset);
10446 
10447  if(!currFree)
10448  {
10449  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10450  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10451  sumUsedSize += suballoc.size;
10452  }
10453  else
10454  {
10455  ++nullItem2ndCount;
10456  }
10457 
10458  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10459  }
10460 
10461  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10462  }
10463 
10464  VMA_VALIDATE(offset <= GetSize());
10465  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
10466 
10467  return true;
10468 }
10469 
10470 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
10471 {
10472  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
10473  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
10474 }
10475 
10476 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
10477 {
10478  const VkDeviceSize size = GetSize();
10479 
10480  /*
10481  We don't consider gaps inside allocation vectors with freed allocations because
10482  they are not suitable for reuse in linear allocator. We consider only space that
10483  is available for new allocations.
10484  */
10485  if(IsEmpty())
10486  {
10487  return size;
10488  }
10489 
10490  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10491 
10492  switch(m_2ndVectorMode)
10493  {
10494  case SECOND_VECTOR_EMPTY:
10495  /*
10496  Available space is after end of 1st, as well as before beginning of 1st (which
10497  would make it a ring buffer).
10498  */
10499  {
10500  const size_t suballocations1stCount = suballocations1st.size();
10501  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
10502  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10503  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
10504  return VMA_MAX(
10505  firstSuballoc.offset,
10506  size - (lastSuballoc.offset + lastSuballoc.size));
10507  }
10508  break;
10509 
10510  case SECOND_VECTOR_RING_BUFFER:
10511  /*
10512  Available space is only between end of 2nd and beginning of 1st.
10513  */
10514  {
10515  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10516  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
10517  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
10518  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
10519  }
10520  break;
10521 
10522  case SECOND_VECTOR_DOUBLE_STACK:
10523  /*
10524  Available space is only between end of 1st and top of 2nd.
10525  */
10526  {
10527  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10528  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
10529  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
10530  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
10531  }
10532  break;
10533 
10534  default:
10535  VMA_ASSERT(0);
10536  return 0;
10537  }
10538 }
10539 
10540 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10541 {
10542  const VkDeviceSize size = GetSize();
10543  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10544  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10545  const size_t suballoc1stCount = suballocations1st.size();
10546  const size_t suballoc2ndCount = suballocations2nd.size();
10547 
10548  outInfo.blockCount = 1;
10549  outInfo.allocationCount = (uint32_t)GetAllocationCount();
10550  outInfo.unusedRangeCount = 0;
10551  outInfo.usedBytes = 0;
10552  outInfo.allocationSizeMin = UINT64_MAX;
10553  outInfo.allocationSizeMax = 0;
10554  outInfo.unusedRangeSizeMin = UINT64_MAX;
10555  outInfo.unusedRangeSizeMax = 0;
10556 
10557  VkDeviceSize lastOffset = 0;
10558 
10559  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10560  {
10561  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10562  size_t nextAlloc2ndIndex = 0;
10563  while(lastOffset < freeSpace2ndTo1stEnd)
10564  {
10565  // Find next non-null allocation or move nextAllocIndex to the end.
10566  while(nextAlloc2ndIndex < suballoc2ndCount &&
10567  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10568  {
10569  ++nextAlloc2ndIndex;
10570  }
10571 
10572  // Found non-null allocation.
10573  if(nextAlloc2ndIndex < suballoc2ndCount)
10574  {
10575  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10576 
10577  // 1. Process free space before this allocation.
10578  if(lastOffset < suballoc.offset)
10579  {
10580  // There is free space from lastOffset to suballoc.offset.
10581  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10582  ++outInfo.unusedRangeCount;
10583  outInfo.unusedBytes += unusedRangeSize;
10584  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10585  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10586  }
10587 
10588  // 2. Process this allocation.
10589  // There is allocation with suballoc.offset, suballoc.size.
10590  outInfo.usedBytes += suballoc.size;
10591  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10592  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10593 
10594  // 3. Prepare for next iteration.
10595  lastOffset = suballoc.offset + suballoc.size;
10596  ++nextAlloc2ndIndex;
10597  }
10598  // We are at the end.
10599  else
10600  {
10601  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10602  if(lastOffset < freeSpace2ndTo1stEnd)
10603  {
10604  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10605  ++outInfo.unusedRangeCount;
10606  outInfo.unusedBytes += unusedRangeSize;
10607  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10608  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10609  }
10610 
10611  // End of loop.
10612  lastOffset = freeSpace2ndTo1stEnd;
10613  }
10614  }
10615  }
10616 
10617  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10618  const VkDeviceSize freeSpace1stTo2ndEnd =
10619  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10620  while(lastOffset < freeSpace1stTo2ndEnd)
10621  {
10622  // Find next non-null allocation or move nextAllocIndex to the end.
10623  while(nextAlloc1stIndex < suballoc1stCount &&
10624  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10625  {
10626  ++nextAlloc1stIndex;
10627  }
10628 
10629  // Found non-null allocation.
10630  if(nextAlloc1stIndex < suballoc1stCount)
10631  {
10632  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10633 
10634  // 1. Process free space before this allocation.
10635  if(lastOffset < suballoc.offset)
10636  {
10637  // There is free space from lastOffset to suballoc.offset.
10638  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10639  ++outInfo.unusedRangeCount;
10640  outInfo.unusedBytes += unusedRangeSize;
10641  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10642  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10643  }
10644 
10645  // 2. Process this allocation.
10646  // There is allocation with suballoc.offset, suballoc.size.
10647  outInfo.usedBytes += suballoc.size;
10648  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10649  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10650 
10651  // 3. Prepare for next iteration.
10652  lastOffset = suballoc.offset + suballoc.size;
10653  ++nextAlloc1stIndex;
10654  }
10655  // We are at the end.
10656  else
10657  {
10658  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10659  if(lastOffset < freeSpace1stTo2ndEnd)
10660  {
10661  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10662  ++outInfo.unusedRangeCount;
10663  outInfo.unusedBytes += unusedRangeSize;
10664  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10665  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10666  }
10667 
10668  // End of loop.
10669  lastOffset = freeSpace1stTo2ndEnd;
10670  }
10671  }
10672 
10673  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10674  {
10675  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10676  while(lastOffset < size)
10677  {
10678  // Find next non-null allocation or move nextAllocIndex to the end.
10679  while(nextAlloc2ndIndex != SIZE_MAX &&
10680  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10681  {
10682  --nextAlloc2ndIndex;
10683  }
10684 
10685  // Found non-null allocation.
10686  if(nextAlloc2ndIndex != SIZE_MAX)
10687  {
10688  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10689 
10690  // 1. Process free space before this allocation.
10691  if(lastOffset < suballoc.offset)
10692  {
10693  // There is free space from lastOffset to suballoc.offset.
10694  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10695  ++outInfo.unusedRangeCount;
10696  outInfo.unusedBytes += unusedRangeSize;
10697  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10698  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10699  }
10700 
10701  // 2. Process this allocation.
10702  // There is allocation with suballoc.offset, suballoc.size.
10703  outInfo.usedBytes += suballoc.size;
10704  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10705  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10706 
10707  // 3. Prepare for next iteration.
10708  lastOffset = suballoc.offset + suballoc.size;
10709  --nextAlloc2ndIndex;
10710  }
10711  // We are at the end.
10712  else
10713  {
10714  // There is free space from lastOffset to size.
10715  if(lastOffset < size)
10716  {
10717  const VkDeviceSize unusedRangeSize = size - lastOffset;
10718  ++outInfo.unusedRangeCount;
10719  outInfo.unusedBytes += unusedRangeSize;
10720  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10721  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10722  }
10723 
10724  // End of loop.
10725  lastOffset = size;
10726  }
10727  }
10728  }
10729 
10730  outInfo.unusedBytes = size - outInfo.usedBytes;
10731 }
10732 
10733 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
10734 {
10735  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10736  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10737  const VkDeviceSize size = GetSize();
10738  const size_t suballoc1stCount = suballocations1st.size();
10739  const size_t suballoc2ndCount = suballocations2nd.size();
10740 
10741  inoutStats.size += size;
10742 
10743  VkDeviceSize lastOffset = 0;
10744 
10745  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10746  {
10747  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10748  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
10749  while(lastOffset < freeSpace2ndTo1stEnd)
10750  {
10751  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10752  while(nextAlloc2ndIndex < suballoc2ndCount &&
10753  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10754  {
10755  ++nextAlloc2ndIndex;
10756  }
10757 
10758  // Found non-null allocation.
10759  if(nextAlloc2ndIndex < suballoc2ndCount)
10760  {
10761  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10762 
10763  // 1. Process free space before this allocation.
10764  if(lastOffset < suballoc.offset)
10765  {
10766  // There is free space from lastOffset to suballoc.offset.
10767  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10768  inoutStats.unusedSize += unusedRangeSize;
10769  ++inoutStats.unusedRangeCount;
10770  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10771  }
10772 
10773  // 2. Process this allocation.
10774  // There is allocation with suballoc.offset, suballoc.size.
10775  ++inoutStats.allocationCount;
10776 
10777  // 3. Prepare for next iteration.
10778  lastOffset = suballoc.offset + suballoc.size;
10779  ++nextAlloc2ndIndex;
10780  }
10781  // We are at the end.
10782  else
10783  {
10784  if(lastOffset < freeSpace2ndTo1stEnd)
10785  {
10786  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10787  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10788  inoutStats.unusedSize += unusedRangeSize;
10789  ++inoutStats.unusedRangeCount;
10790  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10791  }
10792 
10793  // End of loop.
10794  lastOffset = freeSpace2ndTo1stEnd;
10795  }
10796  }
10797  }
10798 
10799  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10800  const VkDeviceSize freeSpace1stTo2ndEnd =
10801  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10802  while(lastOffset < freeSpace1stTo2ndEnd)
10803  {
10804  // Find next non-null allocation or move nextAllocIndex to the end.
10805  while(nextAlloc1stIndex < suballoc1stCount &&
10806  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10807  {
10808  ++nextAlloc1stIndex;
10809  }
10810 
10811  // Found non-null allocation.
10812  if(nextAlloc1stIndex < suballoc1stCount)
10813  {
10814  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10815 
10816  // 1. Process free space before this allocation.
10817  if(lastOffset < suballoc.offset)
10818  {
10819  // There is free space from lastOffset to suballoc.offset.
10820  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10821  inoutStats.unusedSize += unusedRangeSize;
10822  ++inoutStats.unusedRangeCount;
10823  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10824  }
10825 
10826  // 2. Process this allocation.
10827  // There is allocation with suballoc.offset, suballoc.size.
10828  ++inoutStats.allocationCount;
10829 
10830  // 3. Prepare for next iteration.
10831  lastOffset = suballoc.offset + suballoc.size;
10832  ++nextAlloc1stIndex;
10833  }
10834  // We are at the end.
10835  else
10836  {
10837  if(lastOffset < freeSpace1stTo2ndEnd)
10838  {
10839  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10840  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10841  inoutStats.unusedSize += unusedRangeSize;
10842  ++inoutStats.unusedRangeCount;
10843  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10844  }
10845 
10846  // End of loop.
10847  lastOffset = freeSpace1stTo2ndEnd;
10848  }
10849  }
10850 
10851  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10852  {
10853  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10854  while(lastOffset < size)
10855  {
10856  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10857  while(nextAlloc2ndIndex != SIZE_MAX &&
10858  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10859  {
10860  --nextAlloc2ndIndex;
10861  }
10862 
10863  // Found non-null allocation.
10864  if(nextAlloc2ndIndex != SIZE_MAX)
10865  {
10866  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10867 
10868  // 1. Process free space before this allocation.
10869  if(lastOffset < suballoc.offset)
10870  {
10871  // There is free space from lastOffset to suballoc.offset.
10872  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10873  inoutStats.unusedSize += unusedRangeSize;
10874  ++inoutStats.unusedRangeCount;
10875  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10876  }
10877 
10878  // 2. Process this allocation.
10879  // There is allocation with suballoc.offset, suballoc.size.
10880  ++inoutStats.allocationCount;
10881 
10882  // 3. Prepare for next iteration.
10883  lastOffset = suballoc.offset + suballoc.size;
10884  --nextAlloc2ndIndex;
10885  }
10886  // We are at the end.
10887  else
10888  {
10889  if(lastOffset < size)
10890  {
10891  // There is free space from lastOffset to size.
10892  const VkDeviceSize unusedRangeSize = size - lastOffset;
10893  inoutStats.unusedSize += unusedRangeSize;
10894  ++inoutStats.unusedRangeCount;
10895  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10896  }
10897 
10898  // End of loop.
10899  lastOffset = size;
10900  }
10901  }
10902  }
10903 }
10904 
10905 #if VMA_STATS_STRING_ENABLED
10906 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
10907 {
10908  const VkDeviceSize size = GetSize();
10909  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10910  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10911  const size_t suballoc1stCount = suballocations1st.size();
10912  const size_t suballoc2ndCount = suballocations2nd.size();
10913 
10914  // FIRST PASS
10915 
10916  size_t unusedRangeCount = 0;
10917  VkDeviceSize usedBytes = 0;
10918 
10919  VkDeviceSize lastOffset = 0;
10920 
10921  size_t alloc2ndCount = 0;
10922  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10923  {
10924  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10925  size_t nextAlloc2ndIndex = 0;
10926  while(lastOffset < freeSpace2ndTo1stEnd)
10927  {
10928  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10929  while(nextAlloc2ndIndex < suballoc2ndCount &&
10930  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10931  {
10932  ++nextAlloc2ndIndex;
10933  }
10934 
10935  // Found non-null allocation.
10936  if(nextAlloc2ndIndex < suballoc2ndCount)
10937  {
10938  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10939 
10940  // 1. Process free space before this allocation.
10941  if(lastOffset < suballoc.offset)
10942  {
10943  // There is free space from lastOffset to suballoc.offset.
10944  ++unusedRangeCount;
10945  }
10946 
10947  // 2. Process this allocation.
10948  // There is allocation with suballoc.offset, suballoc.size.
10949  ++alloc2ndCount;
10950  usedBytes += suballoc.size;
10951 
10952  // 3. Prepare for next iteration.
10953  lastOffset = suballoc.offset + suballoc.size;
10954  ++nextAlloc2ndIndex;
10955  }
10956  // We are at the end.
10957  else
10958  {
10959  if(lastOffset < freeSpace2ndTo1stEnd)
10960  {
10961  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10962  ++unusedRangeCount;
10963  }
10964 
10965  // End of loop.
10966  lastOffset = freeSpace2ndTo1stEnd;
10967  }
10968  }
10969  }
10970 
10971  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10972  size_t alloc1stCount = 0;
10973  const VkDeviceSize freeSpace1stTo2ndEnd =
10974  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10975  while(lastOffset < freeSpace1stTo2ndEnd)
10976  {
10977  // Find next non-null allocation or move nextAllocIndex to the end.
10978  while(nextAlloc1stIndex < suballoc1stCount &&
10979  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10980  {
10981  ++nextAlloc1stIndex;
10982  }
10983 
10984  // Found non-null allocation.
10985  if(nextAlloc1stIndex < suballoc1stCount)
10986  {
10987  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10988 
10989  // 1. Process free space before this allocation.
10990  if(lastOffset < suballoc.offset)
10991  {
10992  // There is free space from lastOffset to suballoc.offset.
10993  ++unusedRangeCount;
10994  }
10995 
10996  // 2. Process this allocation.
10997  // There is allocation with suballoc.offset, suballoc.size.
10998  ++alloc1stCount;
10999  usedBytes += suballoc.size;
11000 
11001  // 3. Prepare for next iteration.
11002  lastOffset = suballoc.offset + suballoc.size;
11003  ++nextAlloc1stIndex;
11004  }
11005  // We are at the end.
11006  else
11007  {
11008  if(lastOffset < size)
11009  {
11010  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
11011  ++unusedRangeCount;
11012  }
11013 
11014  // End of loop.
11015  lastOffset = freeSpace1stTo2ndEnd;
11016  }
11017  }
11018 
11019  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11020  {
11021  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
11022  while(lastOffset < size)
11023  {
11024  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
11025  while(nextAlloc2ndIndex != SIZE_MAX &&
11026  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
11027  {
11028  --nextAlloc2ndIndex;
11029  }
11030 
11031  // Found non-null allocation.
11032  if(nextAlloc2ndIndex != SIZE_MAX)
11033  {
11034  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
11035 
11036  // 1. Process free space before this allocation.
11037  if(lastOffset < suballoc.offset)
11038  {
11039  // There is free space from lastOffset to suballoc.offset.
11040  ++unusedRangeCount;
11041  }
11042 
11043  // 2. Process this allocation.
11044  // There is allocation with suballoc.offset, suballoc.size.
11045  ++alloc2ndCount;
11046  usedBytes += suballoc.size;
11047 
11048  // 3. Prepare for next iteration.
11049  lastOffset = suballoc.offset + suballoc.size;
11050  --nextAlloc2ndIndex;
11051  }
11052  // We are at the end.
11053  else
11054  {
11055  if(lastOffset < size)
11056  {
11057  // There is free space from lastOffset to size.
11058  ++unusedRangeCount;
11059  }
11060 
11061  // End of loop.
11062  lastOffset = size;
11063  }
11064  }
11065  }
11066 
11067  const VkDeviceSize unusedBytes = size - usedBytes;
11068  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
11069 
11070  // SECOND PASS
11071  lastOffset = 0;
11072 
11073  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11074  {
11075  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
11076  size_t nextAlloc2ndIndex = 0;
11077  while(lastOffset < freeSpace2ndTo1stEnd)
11078  {
11079  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
11080  while(nextAlloc2ndIndex < suballoc2ndCount &&
11081  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
11082  {
11083  ++nextAlloc2ndIndex;
11084  }
11085 
11086  // Found non-null allocation.
11087  if(nextAlloc2ndIndex < suballoc2ndCount)
11088  {
11089  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
11090 
11091  // 1. Process free space before this allocation.
11092  if(lastOffset < suballoc.offset)
11093  {
11094  // There is free space from lastOffset to suballoc.offset.
11095  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
11096  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11097  }
11098 
11099  // 2. Process this allocation.
11100  // There is allocation with suballoc.offset, suballoc.size.
11101  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
11102 
11103  // 3. Prepare for next iteration.
11104  lastOffset = suballoc.offset + suballoc.size;
11105  ++nextAlloc2ndIndex;
11106  }
11107  // We are at the end.
11108  else
11109  {
11110  if(lastOffset < freeSpace2ndTo1stEnd)
11111  {
11112  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
11113  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
11114  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11115  }
11116 
11117  // End of loop.
11118  lastOffset = freeSpace2ndTo1stEnd;
11119  }
11120  }
11121  }
11122 
11123  nextAlloc1stIndex = m_1stNullItemsBeginCount;
11124  while(lastOffset < freeSpace1stTo2ndEnd)
11125  {
11126  // Find next non-null allocation or move nextAllocIndex to the end.
11127  while(nextAlloc1stIndex < suballoc1stCount &&
11128  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
11129  {
11130  ++nextAlloc1stIndex;
11131  }
11132 
11133  // Found non-null allocation.
11134  if(nextAlloc1stIndex < suballoc1stCount)
11135  {
11136  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
11137 
11138  // 1. Process free space before this allocation.
11139  if(lastOffset < suballoc.offset)
11140  {
11141  // There is free space from lastOffset to suballoc.offset.
11142  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
11143  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11144  }
11145 
11146  // 2. Process this allocation.
11147  // There is allocation with suballoc.offset, suballoc.size.
11148  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
11149 
11150  // 3. Prepare for next iteration.
11151  lastOffset = suballoc.offset + suballoc.size;
11152  ++nextAlloc1stIndex;
11153  }
11154  // We are at the end.
11155  else
11156  {
11157  if(lastOffset < freeSpace1stTo2ndEnd)
11158  {
11159  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
11160  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
11161  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11162  }
11163 
11164  // End of loop.
11165  lastOffset = freeSpace1stTo2ndEnd;
11166  }
11167  }
11168 
11169  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11170  {
11171  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
11172  while(lastOffset < size)
11173  {
11174  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
11175  while(nextAlloc2ndIndex != SIZE_MAX &&
11176  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
11177  {
11178  --nextAlloc2ndIndex;
11179  }
11180 
11181  // Found non-null allocation.
11182  if(nextAlloc2ndIndex != SIZE_MAX)
11183  {
11184  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
11185 
11186  // 1. Process free space before this allocation.
11187  if(lastOffset < suballoc.offset)
11188  {
11189  // There is free space from lastOffset to suballoc.offset.
11190  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
11191  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11192  }
11193 
11194  // 2. Process this allocation.
11195  // There is allocation with suballoc.offset, suballoc.size.
11196  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
11197 
11198  // 3. Prepare for next iteration.
11199  lastOffset = suballoc.offset + suballoc.size;
11200  --nextAlloc2ndIndex;
11201  }
11202  // We are at the end.
11203  else
11204  {
11205  if(lastOffset < size)
11206  {
11207  // There is free space from lastOffset to size.
11208  const VkDeviceSize unusedRangeSize = size - lastOffset;
11209  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
11210  }
11211 
11212  // End of loop.
11213  lastOffset = size;
11214  }
11215  }
11216  }
11217 
11218  PrintDetailedMap_End(json);
11219 }
11220 #endif // #if VMA_STATS_STRING_ENABLED
11221 
11222 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
11223  uint32_t currentFrameIndex,
11224  uint32_t frameInUseCount,
11225  VkDeviceSize bufferImageGranularity,
11226  VkDeviceSize allocSize,
11227  VkDeviceSize allocAlignment,
11228  bool upperAddress,
11229  VmaSuballocationType allocType,
11230  bool canMakeOtherLost,
11231  uint32_t strategy,
11232  VmaAllocationRequest* pAllocationRequest)
11233 {
11234  VMA_ASSERT(allocSize > 0);
11235  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
11236  VMA_ASSERT(pAllocationRequest != VMA_NULL);
11237  VMA_HEAVY_ASSERT(Validate());
11238  return upperAddress ?
11239  CreateAllocationRequest_UpperAddress(
11240  currentFrameIndex, frameInUseCount, bufferImageGranularity,
11241  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
11242  CreateAllocationRequest_LowerAddress(
11243  currentFrameIndex, frameInUseCount, bufferImageGranularity,
11244  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
11245 }
11246 
11247 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
11248  uint32_t currentFrameIndex,
11249  uint32_t frameInUseCount,
11250  VkDeviceSize bufferImageGranularity,
11251  VkDeviceSize allocSize,
11252  VkDeviceSize allocAlignment,
11253  VmaSuballocationType allocType,
11254  bool canMakeOtherLost,
11255  uint32_t strategy,
11256  VmaAllocationRequest* pAllocationRequest)
11257 {
11258  const VkDeviceSize size = GetSize();
11259  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11260  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11261 
11262  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11263  {
11264  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
11265  return false;
11266  }
11267 
11268  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
11269  if(allocSize > size)
11270  {
11271  return false;
11272  }
11273  VkDeviceSize resultBaseOffset = size - allocSize;
11274  if(!suballocations2nd.empty())
11275  {
11276  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
11277  resultBaseOffset = lastSuballoc.offset - allocSize;
11278  if(allocSize > lastSuballoc.offset)
11279  {
11280  return false;
11281  }
11282  }
11283 
11284  // Start from offset equal to end of free space.
11285  VkDeviceSize resultOffset = resultBaseOffset;
11286 
11287  // Apply VMA_DEBUG_MARGIN at the end.
11288  if(VMA_DEBUG_MARGIN > 0)
11289  {
11290  if(resultOffset < VMA_DEBUG_MARGIN)
11291  {
11292  return false;
11293  }
11294  resultOffset -= VMA_DEBUG_MARGIN;
11295  }
11296 
11297  // Apply alignment.
11298  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
11299 
11300  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
11301  // Make bigger alignment if necessary.
11302  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
11303  {
11304  bool bufferImageGranularityConflict = false;
11305  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
11306  {
11307  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
11308  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11309  {
11310  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
11311  {
11312  bufferImageGranularityConflict = true;
11313  break;
11314  }
11315  }
11316  else
11317  // Already on previous page.
11318  break;
11319  }
11320  if(bufferImageGranularityConflict)
11321  {
11322  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
11323  }
11324  }
11325 
11326  // There is enough free space.
11327  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
11328  suballocations1st.back().offset + suballocations1st.back().size :
11329  0;
11330  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
11331  {
11332  // Check previous suballocations for BufferImageGranularity conflicts.
11333  // If conflict exists, allocation cannot be made here.
11334  if(bufferImageGranularity > 1)
11335  {
11336  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11337  {
11338  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11339  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11340  {
11341  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
11342  {
11343  return false;
11344  }
11345  }
11346  else
11347  {
11348  // Already on next page.
11349  break;
11350  }
11351  }
11352  }
11353 
11354  // All tests passed: Success.
11355  pAllocationRequest->offset = resultOffset;
11356  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
11357  pAllocationRequest->sumItemSize = 0;
11358  // pAllocationRequest->item unused.
11359  pAllocationRequest->itemsToMakeLostCount = 0;
11360  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
11361  return true;
11362  }
11363 
11364  return false;
11365 }
11366 
11367 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
11368  uint32_t currentFrameIndex,
11369  uint32_t frameInUseCount,
11370  VkDeviceSize bufferImageGranularity,
11371  VkDeviceSize allocSize,
11372  VkDeviceSize allocAlignment,
11373  VmaSuballocationType allocType,
11374  bool canMakeOtherLost,
11375  uint32_t strategy,
11376  VmaAllocationRequest* pAllocationRequest)
11377 {
11378  const VkDeviceSize size = GetSize();
11379  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11380  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11381 
11382  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11383  {
11384  // Try to allocate at the end of 1st vector.
11385 
11386  VkDeviceSize resultBaseOffset = 0;
11387  if(!suballocations1st.empty())
11388  {
11389  const VmaSuballocation& lastSuballoc = suballocations1st.back();
11390  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11391  }
11392 
11393  // Start from offset equal to beginning of free space.
11394  VkDeviceSize resultOffset = resultBaseOffset;
11395 
11396  // Apply VMA_DEBUG_MARGIN at the beginning.
11397  if(VMA_DEBUG_MARGIN > 0)
11398  {
11399  resultOffset += VMA_DEBUG_MARGIN;
11400  }
11401 
11402  // Apply alignment.
11403  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11404 
11405  // Check previous suballocations for BufferImageGranularity conflicts.
11406  // Make bigger alignment if necessary.
11407  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
11408  {
11409  bool bufferImageGranularityConflict = false;
11410  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11411  {
11412  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11413  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11414  {
11415  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11416  {
11417  bufferImageGranularityConflict = true;
11418  break;
11419  }
11420  }
11421  else
11422  // Already on previous page.
11423  break;
11424  }
11425  if(bufferImageGranularityConflict)
11426  {
11427  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11428  }
11429  }
11430 
11431  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
11432  suballocations2nd.back().offset : size;
11433 
11434  // There is enough free space at the end after alignment.
11435  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
11436  {
11437  // Check next suballocations for BufferImageGranularity conflicts.
11438  // If conflict exists, allocation cannot be made here.
11439  if((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11440  {
11441  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
11442  {
11443  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
11444  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11445  {
11446  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11447  {
11448  return false;
11449  }
11450  }
11451  else
11452  {
11453  // Already on previous page.
11454  break;
11455  }
11456  }
11457  }
11458 
11459  // All tests passed: Success.
11460  pAllocationRequest->offset = resultOffset;
11461  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
11462  pAllocationRequest->sumItemSize = 0;
11463  // pAllocationRequest->item, customData unused.
11464  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
11465  pAllocationRequest->itemsToMakeLostCount = 0;
11466  return true;
11467  }
11468  }
11469 
11470  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
11471  // beginning of 1st vector as the end of free space.
11472  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11473  {
11474  VMA_ASSERT(!suballocations1st.empty());
11475 
11476  VkDeviceSize resultBaseOffset = 0;
11477  if(!suballocations2nd.empty())
11478  {
11479  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
11480  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11481  }
11482 
11483  // Start from offset equal to beginning of free space.
11484  VkDeviceSize resultOffset = resultBaseOffset;
11485 
11486  // Apply VMA_DEBUG_MARGIN at the beginning.
11487  if(VMA_DEBUG_MARGIN > 0)
11488  {
11489  resultOffset += VMA_DEBUG_MARGIN;
11490  }
11491 
11492  // Apply alignment.
11493  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11494 
11495  // Check previous suballocations for BufferImageGranularity conflicts.
11496  // Make bigger alignment if necessary.
11497  if(bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
11498  {
11499  bool bufferImageGranularityConflict = false;
11500  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
11501  {
11502  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
11503  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11504  {
11505  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11506  {
11507  bufferImageGranularityConflict = true;
11508  break;
11509  }
11510  }
11511  else
11512  // Already on previous page.
11513  break;
11514  }
11515  if(bufferImageGranularityConflict)
11516  {
11517  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11518  }
11519  }
11520 
11521  pAllocationRequest->itemsToMakeLostCount = 0;
11522  pAllocationRequest->sumItemSize = 0;
11523  size_t index1st = m_1stNullItemsBeginCount;
11524 
11525  if(canMakeOtherLost)
11526  {
11527  while(index1st < suballocations1st.size() &&
11528  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
11529  {
11530  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
11531  const VmaSuballocation& suballoc = suballocations1st[index1st];
11532  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
11533  {
11534  // No problem.
11535  }
11536  else
11537  {
11538  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11539  if(suballoc.hAllocation->CanBecomeLost() &&
11540  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11541  {
11542  ++pAllocationRequest->itemsToMakeLostCount;
11543  pAllocationRequest->sumItemSize += suballoc.size;
11544  }
11545  else
11546  {
11547  return false;
11548  }
11549  }
11550  ++index1st;
11551  }
11552 
11553  // Check next suballocations for BufferImageGranularity conflicts.
11554  // If conflict exists, we must mark more allocations lost or fail.
11555  if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
11556  {
11557  while(index1st < suballocations1st.size())
11558  {
11559  const VmaSuballocation& suballoc = suballocations1st[index1st];
11560  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
11561  {
11562  if(suballoc.hAllocation != VK_NULL_HANDLE)
11563  {
11564  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
11565  if(suballoc.hAllocation->CanBecomeLost() &&
11566  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11567  {
11568  ++pAllocationRequest->itemsToMakeLostCount;
11569  pAllocationRequest->sumItemSize += suballoc.size;
11570  }
11571  else
11572  {
11573  return false;
11574  }
11575  }
11576  }
11577  else
11578  {
11579  // Already on next page.
11580  break;
11581  }
11582  ++index1st;
11583  }
11584  }
11585 
11586  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
11587  if(index1st == suballocations1st.size() &&
11588  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
11589  {
11590  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
11591  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
11592  }
11593  }
11594 
11595  // There is enough free space at the end after alignment.
11596  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
11597  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
11598  {
11599  // Check next suballocations for BufferImageGranularity conflicts.
11600  // If conflict exists, allocation cannot be made here.
11601  if(allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
11602  {
11603  for(size_t nextSuballocIndex = index1st;
11604  nextSuballocIndex < suballocations1st.size();
11605  nextSuballocIndex++)
11606  {
11607  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
11608  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11609  {
11610  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11611  {
11612  return false;
11613  }
11614  }
11615  else
11616  {
11617  // Already on next page.
11618  break;
11619  }
11620  }
11621  }
11622 
11623  // All tests passed: Success.
11624  pAllocationRequest->offset = resultOffset;
11625  pAllocationRequest->sumFreeSize =
11626  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
11627  - resultBaseOffset
11628  - pAllocationRequest->sumItemSize;
11629  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
11630  // pAllocationRequest->item, customData unused.
11631  return true;
11632  }
11633  }
11634 
11635  return false;
11636 }
11637 
11638 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
11639  uint32_t currentFrameIndex,
11640  uint32_t frameInUseCount,
11641  VmaAllocationRequest* pAllocationRequest)
11642 {
11643  if(pAllocationRequest->itemsToMakeLostCount == 0)
11644  {
11645  return true;
11646  }
11647 
11648  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
11649 
11650  // We always start from 1st.
11651  SuballocationVectorType* suballocations = &AccessSuballocations1st();
11652  size_t index = m_1stNullItemsBeginCount;
11653  size_t madeLostCount = 0;
11654  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
11655  {
11656  if(index == suballocations->size())
11657  {
11658  index = 0;
11659  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
11660  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11661  {
11662  suballocations = &AccessSuballocations2nd();
11663  }
11664  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
11665  // suballocations continues pointing at AccessSuballocations1st().
11666  VMA_ASSERT(!suballocations->empty());
11667  }
11668  VmaSuballocation& suballoc = (*suballocations)[index];
11669  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11670  {
11671  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11672  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
11673  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11674  {
11675  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11676  suballoc.hAllocation = VK_NULL_HANDLE;
11677  m_SumFreeSize += suballoc.size;
11678  if(suballocations == &AccessSuballocations1st())
11679  {
11680  ++m_1stNullItemsMiddleCount;
11681  }
11682  else
11683  {
11684  ++m_2ndNullItemsCount;
11685  }
11686  ++madeLostCount;
11687  }
11688  else
11689  {
11690  return false;
11691  }
11692  }
11693  ++index;
11694  }
11695 
11696  CleanupAfterFree();
11697  //VMA_HEAVY_ASSERT(Validate()); // Already called by CleanupAfterFree().
11698 
11699  return true;
11700 }
11701 
11702 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11703 {
11704  uint32_t lostAllocationCount = 0;
11705 
11706  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11707  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11708  {
11709  VmaSuballocation& suballoc = suballocations1st[i];
11710  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11711  suballoc.hAllocation->CanBecomeLost() &&
11712  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11713  {
11714  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11715  suballoc.hAllocation = VK_NULL_HANDLE;
11716  ++m_1stNullItemsMiddleCount;
11717  m_SumFreeSize += suballoc.size;
11718  ++lostAllocationCount;
11719  }
11720  }
11721 
11722  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11723  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11724  {
11725  VmaSuballocation& suballoc = suballocations2nd[i];
11726  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11727  suballoc.hAllocation->CanBecomeLost() &&
11728  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11729  {
11730  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11731  suballoc.hAllocation = VK_NULL_HANDLE;
11732  ++m_2ndNullItemsCount;
11733  m_SumFreeSize += suballoc.size;
11734  ++lostAllocationCount;
11735  }
11736  }
11737 
11738  if(lostAllocationCount)
11739  {
11740  CleanupAfterFree();
11741  }
11742 
11743  return lostAllocationCount;
11744 }
11745 
11746 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
11747 {
11748  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11749  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11750  {
11751  const VmaSuballocation& suballoc = suballocations1st[i];
11752  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11753  {
11754  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11755  {
11756  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11757  return VK_ERROR_VALIDATION_FAILED_EXT;
11758  }
11759  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11760  {
11761  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11762  return VK_ERROR_VALIDATION_FAILED_EXT;
11763  }
11764  }
11765  }
11766 
11767  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11768  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11769  {
11770  const VmaSuballocation& suballoc = suballocations2nd[i];
11771  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11772  {
11773  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11774  {
11775  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11776  return VK_ERROR_VALIDATION_FAILED_EXT;
11777  }
11778  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11779  {
11780  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11781  return VK_ERROR_VALIDATION_FAILED_EXT;
11782  }
11783  }
11784  }
11785 
11786  return VK_SUCCESS;
11787 }
11788 
11789 void VmaBlockMetadata_Linear::Alloc(
11790  const VmaAllocationRequest& request,
11791  VmaSuballocationType type,
11792  VkDeviceSize allocSize,
11793  VmaAllocation hAllocation)
11794 {
11795  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
11796 
11797  switch(request.type)
11798  {
11799  case VmaAllocationRequestType::UpperAddress:
11800  {
11801  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
11802  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
11803  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11804  suballocations2nd.push_back(newSuballoc);
11805  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
11806  }
11807  break;
11808  case VmaAllocationRequestType::EndOf1st:
11809  {
11810  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11811 
11812  VMA_ASSERT(suballocations1st.empty() ||
11813  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
11814  // Check if it fits before the end of the block.
11815  VMA_ASSERT(request.offset + allocSize <= GetSize());
11816 
11817  suballocations1st.push_back(newSuballoc);
11818  }
11819  break;
11820  case VmaAllocationRequestType::EndOf2nd:
11821  {
11822  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11823  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
11824  VMA_ASSERT(!suballocations1st.empty() &&
11825  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
11826  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11827 
11828  switch(m_2ndVectorMode)
11829  {
11830  case SECOND_VECTOR_EMPTY:
11831  // First allocation from second part ring buffer.
11832  VMA_ASSERT(suballocations2nd.empty());
11833  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
11834  break;
11835  case SECOND_VECTOR_RING_BUFFER:
11836  // 2-part ring buffer is already started.
11837  VMA_ASSERT(!suballocations2nd.empty());
11838  break;
11839  case SECOND_VECTOR_DOUBLE_STACK:
11840  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
11841  break;
11842  default:
11843  VMA_ASSERT(0);
11844  }
11845 
11846  suballocations2nd.push_back(newSuballoc);
11847  }
11848  break;
11849  default:
11850  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
11851  }
11852 
11853  m_SumFreeSize -= newSuballoc.size;
11854 }
11855 
11856 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
11857 {
11858  FreeAtOffset(allocation->GetOffset());
11859 }
11860 
11861 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
11862 {
11863  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11864  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11865 
11866  if(!suballocations1st.empty())
11867  {
11868  // First allocation: Mark it as next empty at the beginning.
11869  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
11870  if(firstSuballoc.offset == offset)
11871  {
11872  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11873  firstSuballoc.hAllocation = VK_NULL_HANDLE;
11874  m_SumFreeSize += firstSuballoc.size;
11875  ++m_1stNullItemsBeginCount;
11876  CleanupAfterFree();
11877  return;
11878  }
11879  }
11880 
11881  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
11882  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
11883  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11884  {
11885  VmaSuballocation& lastSuballoc = suballocations2nd.back();
11886  if(lastSuballoc.offset == offset)
11887  {
11888  m_SumFreeSize += lastSuballoc.size;
11889  suballocations2nd.pop_back();
11890  CleanupAfterFree();
11891  return;
11892  }
11893  }
11894  // Last allocation in 1st vector.
11895  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
11896  {
11897  VmaSuballocation& lastSuballoc = suballocations1st.back();
11898  if(lastSuballoc.offset == offset)
11899  {
11900  m_SumFreeSize += lastSuballoc.size;
11901  suballocations1st.pop_back();
11902  CleanupAfterFree();
11903  return;
11904  }
11905  }
11906 
11907  // Item from the middle of 1st vector.
11908  {
11909  VmaSuballocation refSuballoc;
11910  refSuballoc.offset = offset;
11911  // Rest of members stays uninitialized intentionally for better performance.
11912  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11913  suballocations1st.begin() + m_1stNullItemsBeginCount,
11914  suballocations1st.end(),
11915  refSuballoc,
11916  VmaSuballocationOffsetLess());
11917  if(it != suballocations1st.end())
11918  {
11919  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11920  it->hAllocation = VK_NULL_HANDLE;
11921  ++m_1stNullItemsMiddleCount;
11922  m_SumFreeSize += it->size;
11923  CleanupAfterFree();
11924  return;
11925  }
11926  }
11927 
11928  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11929  {
11930  // Item from the middle of 2nd vector.
11931  VmaSuballocation refSuballoc;
11932  refSuballoc.offset = offset;
11933  // Rest of members stays uninitialized intentionally for better performance.
11934  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11935  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11936  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11937  if(it != suballocations2nd.end())
11938  {
11939  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11940  it->hAllocation = VK_NULL_HANDLE;
11941  ++m_2ndNullItemsCount;
11942  m_SumFreeSize += it->size;
11943  CleanupAfterFree();
11944  return;
11945  }
11946  }
11947 
11948  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11949 }
11950 
11951 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11952 {
11953  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11954  const size_t suballocCount = AccessSuballocations1st().size();
11955  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11956 }
11957 
11958 void VmaBlockMetadata_Linear::CleanupAfterFree()
11959 {
11960  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11961  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11962 
11963  if(IsEmpty())
11964  {
11965  suballocations1st.clear();
11966  suballocations2nd.clear();
11967  m_1stNullItemsBeginCount = 0;
11968  m_1stNullItemsMiddleCount = 0;
11969  m_2ndNullItemsCount = 0;
11970  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11971  }
11972  else
11973  {
11974  const size_t suballoc1stCount = suballocations1st.size();
11975  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11976  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
11977 
11978  // Find more null items at the beginning of 1st vector.
11979  while(m_1stNullItemsBeginCount < suballoc1stCount &&
11980  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11981  {
11982  ++m_1stNullItemsBeginCount;
11983  --m_1stNullItemsMiddleCount;
11984  }
11985 
11986  // Find more null items at the end of 1st vector.
11987  while(m_1stNullItemsMiddleCount > 0 &&
11988  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
11989  {
11990  --m_1stNullItemsMiddleCount;
11991  suballocations1st.pop_back();
11992  }
11993 
11994  // Find more null items at the end of 2nd vector.
11995  while(m_2ndNullItemsCount > 0 &&
11996  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
11997  {
11998  --m_2ndNullItemsCount;
11999  suballocations2nd.pop_back();
12000  }
12001 
12002  // Find more null items at the beginning of 2nd vector.
12003  while(m_2ndNullItemsCount > 0 &&
12004  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
12005  {
12006  --m_2ndNullItemsCount;
12007  VmaVectorRemove(suballocations2nd, 0);
12008  }
12009 
12010  if(ShouldCompact1st())
12011  {
12012  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
12013  size_t srcIndex = m_1stNullItemsBeginCount;
12014  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
12015  {
12016  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
12017  {
12018  ++srcIndex;
12019  }
12020  if(dstIndex != srcIndex)
12021  {
12022  suballocations1st[dstIndex] = suballocations1st[srcIndex];
12023  }
12024  ++srcIndex;
12025  }
12026  suballocations1st.resize(nonNullItemCount);
12027  m_1stNullItemsBeginCount = 0;
12028  m_1stNullItemsMiddleCount = 0;
12029  }
12030 
12031  // 2nd vector became empty.
12032  if(suballocations2nd.empty())
12033  {
12034  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
12035  }
12036 
12037  // 1st vector became empty.
12038  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
12039  {
12040  suballocations1st.clear();
12041  m_1stNullItemsBeginCount = 0;
12042 
12043  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
12044  {
12045  // Swap 1st with 2nd. Now 2nd is empty.
12046  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
12047  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
12048  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
12049  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
12050  {
12051  ++m_1stNullItemsBeginCount;
12052  --m_1stNullItemsMiddleCount;
12053  }
12054  m_2ndNullItemsCount = 0;
12055  m_1stVectorIndex ^= 1;
12056  }
12057  }
12058  }
12059 
12060  VMA_HEAVY_ASSERT(Validate());
12061 }
12062 
12063 
12065 // class VmaBlockMetadata_Buddy
12066 
12067 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
12068  VmaBlockMetadata(hAllocator),
12069  m_Root(VMA_NULL),
12070  m_AllocationCount(0),
12071  m_FreeCount(1),
12072  m_SumFreeSize(0)
12073 {
12074  memset(m_FreeList, 0, sizeof(m_FreeList));
12075 }
12076 
12077 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
12078 {
12079  DeleteNode(m_Root);
12080 }
12081 
12082 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
12083 {
12084  VmaBlockMetadata::Init(size);
12085 
12086  m_UsableSize = VmaPrevPow2(size);
12087  m_SumFreeSize = m_UsableSize;
12088 
12089  // Calculate m_LevelCount.
12090  m_LevelCount = 1;
12091  while(m_LevelCount < MAX_LEVELS &&
12092  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
12093  {
12094  ++m_LevelCount;
12095  }
12096 
12097  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
12098  rootNode->offset = 0;
12099  rootNode->type = Node::TYPE_FREE;
12100  rootNode->parent = VMA_NULL;
12101  rootNode->buddy = VMA_NULL;
12102 
12103  m_Root = rootNode;
12104  AddToFreeListFront(0, rootNode);
12105 }
12106 
12107 bool VmaBlockMetadata_Buddy::Validate() const
12108 {
12109  // Validate tree.
12110  ValidationContext ctx;
12111  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
12112  {
12113  VMA_VALIDATE(false && "ValidateNode failed.");
12114  }
12115  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
12116  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
12117 
12118  // Validate free node lists.
12119  for(uint32_t level = 0; level < m_LevelCount; ++level)
12120  {
12121  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
12122  m_FreeList[level].front->free.prev == VMA_NULL);
12123 
12124  for(Node* node = m_FreeList[level].front;
12125  node != VMA_NULL;
12126  node = node->free.next)
12127  {
12128  VMA_VALIDATE(node->type == Node::TYPE_FREE);
12129 
12130  if(node->free.next == VMA_NULL)
12131  {
12132  VMA_VALIDATE(m_FreeList[level].back == node);
12133  }
12134  else
12135  {
12136  VMA_VALIDATE(node->free.next->free.prev == node);
12137  }
12138  }
12139  }
12140 
12141  // Validate that free lists ar higher levels are empty.
12142  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
12143  {
12144  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
12145  }
12146 
12147  return true;
12148 }
12149 
12150 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
12151 {
12152  for(uint32_t level = 0; level < m_LevelCount; ++level)
12153  {
12154  if(m_FreeList[level].front != VMA_NULL)
12155  {
12156  return LevelToNodeSize(level);
12157  }
12158  }
12159  return 0;
12160 }
12161 
12162 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
12163 {
12164  const VkDeviceSize unusableSize = GetUnusableSize();
12165 
12166  outInfo.blockCount = 1;
12167 
12168  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
12169  outInfo.usedBytes = outInfo.unusedBytes = 0;
12170 
12171  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
12172  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
12173  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
12174 
12175  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
12176 
12177  if(unusableSize > 0)
12178  {
12179  ++outInfo.unusedRangeCount;
12180  outInfo.unusedBytes += unusableSize;
12181  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
12182  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
12183  }
12184 }
12185 
12186 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
12187 {
12188  const VkDeviceSize unusableSize = GetUnusableSize();
12189 
12190  inoutStats.size += GetSize();
12191  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
12192  inoutStats.allocationCount += m_AllocationCount;
12193  inoutStats.unusedRangeCount += m_FreeCount;
12194  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
12195 
12196  if(unusableSize > 0)
12197  {
12198  ++inoutStats.unusedRangeCount;
12199  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
12200  }
12201 }
12202 
12203 #if VMA_STATS_STRING_ENABLED
12204 
12205 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
12206 {
12207  // TODO optimize
12208  VmaStatInfo stat;
12209  CalcAllocationStatInfo(stat);
12210 
12211  PrintDetailedMap_Begin(
12212  json,
12213  stat.unusedBytes,
12214  stat.allocationCount,
12215  stat.unusedRangeCount);
12216 
12217  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
12218 
12219  const VkDeviceSize unusableSize = GetUnusableSize();
12220  if(unusableSize > 0)
12221  {
12222  PrintDetailedMap_UnusedRange(json,
12223  m_UsableSize, // offset
12224  unusableSize); // size
12225  }
12226 
12227  PrintDetailedMap_End(json);
12228 }
12229 
12230 #endif // #if VMA_STATS_STRING_ENABLED
12231 
12232 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
12233  uint32_t currentFrameIndex,
12234  uint32_t frameInUseCount,
12235  VkDeviceSize bufferImageGranularity,
12236  VkDeviceSize allocSize,
12237  VkDeviceSize allocAlignment,
12238  bool upperAddress,
12239  VmaSuballocationType allocType,
12240  bool canMakeOtherLost,
12241  uint32_t strategy,
12242  VmaAllocationRequest* pAllocationRequest)
12243 {
12244  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
12245 
12246  // Simple way to respect bufferImageGranularity. May be optimized some day.
12247  // Whenever it might be an OPTIMAL image...
12248  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
12249  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
12250  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
12251  {
12252  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
12253  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
12254  }
12255 
12256  if(allocSize > m_UsableSize)
12257  {
12258  return false;
12259  }
12260 
12261  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
12262  for(uint32_t level = targetLevel + 1; level--; )
12263  {
12264  for(Node* freeNode = m_FreeList[level].front;
12265  freeNode != VMA_NULL;
12266  freeNode = freeNode->free.next)
12267  {
12268  if(freeNode->offset % allocAlignment == 0)
12269  {
12270  pAllocationRequest->type = VmaAllocationRequestType::Normal;
12271  pAllocationRequest->offset = freeNode->offset;
12272  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
12273  pAllocationRequest->sumItemSize = 0;
12274  pAllocationRequest->itemsToMakeLostCount = 0;
12275  pAllocationRequest->customData = (void*)(uintptr_t)level;
12276  return true;
12277  }
12278  }
12279  }
12280 
12281  return false;
12282 }
12283 
12284 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
12285  uint32_t currentFrameIndex,
12286  uint32_t frameInUseCount,
12287  VmaAllocationRequest* pAllocationRequest)
12288 {
12289  /*
12290  Lost allocations are not supported in buddy allocator at the moment.
12291  Support might be added in the future.
12292  */
12293  return pAllocationRequest->itemsToMakeLostCount == 0;
12294 }
12295 
12296 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
12297 {
12298  /*
12299  Lost allocations are not supported in buddy allocator at the moment.
12300  Support might be added in the future.
12301  */
12302  return 0;
12303 }
12304 
12305 void VmaBlockMetadata_Buddy::Alloc(
12306  const VmaAllocationRequest& request,
12307  VmaSuballocationType type,
12308  VkDeviceSize allocSize,
12309  VmaAllocation hAllocation)
12310 {
12311  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
12312 
12313  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
12314  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
12315 
12316  Node* currNode = m_FreeList[currLevel].front;
12317  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
12318  while(currNode->offset != request.offset)
12319  {
12320  currNode = currNode->free.next;
12321  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
12322  }
12323 
12324  // Go down, splitting free nodes.
12325  while(currLevel < targetLevel)
12326  {
12327  // currNode is already first free node at currLevel.
12328  // Remove it from list of free nodes at this currLevel.
12329  RemoveFromFreeList(currLevel, currNode);
12330 
12331  const uint32_t childrenLevel = currLevel + 1;
12332 
12333  // Create two free sub-nodes.
12334  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
12335  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
12336 
12337  leftChild->offset = currNode->offset;
12338  leftChild->type = Node::TYPE_FREE;
12339  leftChild->parent = currNode;
12340  leftChild->buddy = rightChild;
12341 
12342  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
12343  rightChild->type = Node::TYPE_FREE;
12344  rightChild->parent = currNode;
12345  rightChild->buddy = leftChild;
12346 
12347  // Convert current currNode to split type.
12348  currNode->type = Node::TYPE_SPLIT;
12349  currNode->split.leftChild = leftChild;
12350 
12351  // Add child nodes to free list. Order is important!
12352  AddToFreeListFront(childrenLevel, rightChild);
12353  AddToFreeListFront(childrenLevel, leftChild);
12354 
12355  ++m_FreeCount;
12356  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
12357  ++currLevel;
12358  currNode = m_FreeList[currLevel].front;
12359 
12360  /*
12361  We can be sure that currNode, as left child of node previously split,
12362  also fullfills the alignment requirement.
12363  */
12364  }
12365 
12366  // Remove from free list.
12367  VMA_ASSERT(currLevel == targetLevel &&
12368  currNode != VMA_NULL &&
12369  currNode->type == Node::TYPE_FREE);
12370  RemoveFromFreeList(currLevel, currNode);
12371 
12372  // Convert to allocation node.
12373  currNode->type = Node::TYPE_ALLOCATION;
12374  currNode->allocation.alloc = hAllocation;
12375 
12376  ++m_AllocationCount;
12377  --m_FreeCount;
12378  m_SumFreeSize -= allocSize;
12379 }
12380 
12381 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
12382 {
12383  if(node->type == Node::TYPE_SPLIT)
12384  {
12385  DeleteNode(node->split.leftChild->buddy);
12386  DeleteNode(node->split.leftChild);
12387  }
12388 
12389  vma_delete(GetAllocationCallbacks(), node);
12390 }
12391 
12392 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
12393 {
12394  VMA_VALIDATE(level < m_LevelCount);
12395  VMA_VALIDATE(curr->parent == parent);
12396  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
12397  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
12398  switch(curr->type)
12399  {
12400  case Node::TYPE_FREE:
12401  // curr->free.prev, next are validated separately.
12402  ctx.calculatedSumFreeSize += levelNodeSize;
12403  ++ctx.calculatedFreeCount;
12404  break;
12405  case Node::TYPE_ALLOCATION:
12406  ++ctx.calculatedAllocationCount;
12407  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
12408  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
12409  break;
12410  case Node::TYPE_SPLIT:
12411  {
12412  const uint32_t childrenLevel = level + 1;
12413  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
12414  const Node* const leftChild = curr->split.leftChild;
12415  VMA_VALIDATE(leftChild != VMA_NULL);
12416  VMA_VALIDATE(leftChild->offset == curr->offset);
12417  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
12418  {
12419  VMA_VALIDATE(false && "ValidateNode for left child failed.");
12420  }
12421  const Node* const rightChild = leftChild->buddy;
12422  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
12423  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
12424  {
12425  VMA_VALIDATE(false && "ValidateNode for right child failed.");
12426  }
12427  }
12428  break;
12429  default:
12430  return false;
12431  }
12432 
12433  return true;
12434 }
12435 
12436 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
12437 {
12438  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
12439  uint32_t level = 0;
12440  VkDeviceSize currLevelNodeSize = m_UsableSize;
12441  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
12442  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
12443  {
12444  ++level;
12445  currLevelNodeSize = nextLevelNodeSize;
12446  nextLevelNodeSize = currLevelNodeSize >> 1;
12447  }
12448  return level;
12449 }
12450 
12451 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
12452 {
12453  // Find node and level.
12454  Node* node = m_Root;
12455  VkDeviceSize nodeOffset = 0;
12456  uint32_t level = 0;
12457  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
12458  while(node->type == Node::TYPE_SPLIT)
12459  {
12460  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
12461  if(offset < nodeOffset + nextLevelSize)
12462  {
12463  node = node->split.leftChild;
12464  }
12465  else
12466  {
12467  node = node->split.leftChild->buddy;
12468  nodeOffset += nextLevelSize;
12469  }
12470  ++level;
12471  levelNodeSize = nextLevelSize;
12472  }
12473 
12474  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
12475  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
12476 
12477  ++m_FreeCount;
12478  --m_AllocationCount;
12479  m_SumFreeSize += alloc->GetSize();
12480 
12481  node->type = Node::TYPE_FREE;
12482 
12483  // Join free nodes if possible.
12484  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
12485  {
12486  RemoveFromFreeList(level, node->buddy);
12487  Node* const parent = node->parent;
12488 
12489  vma_delete(GetAllocationCallbacks(), node->buddy);
12490  vma_delete(GetAllocationCallbacks(), node);
12491  parent->type = Node::TYPE_FREE;
12492 
12493  node = parent;
12494  --level;
12495  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
12496  --m_FreeCount;
12497  }
12498 
12499  AddToFreeListFront(level, node);
12500 }
12501 
12502 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
12503 {
12504  switch(node->type)
12505  {
12506  case Node::TYPE_FREE:
12507  ++outInfo.unusedRangeCount;
12508  outInfo.unusedBytes += levelNodeSize;
12509  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
12510  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
12511  break;
12512  case Node::TYPE_ALLOCATION:
12513  {
12514  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12515  ++outInfo.allocationCount;
12516  outInfo.usedBytes += allocSize;
12517  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
12518  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
12519 
12520  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
12521  if(unusedRangeSize > 0)
12522  {
12523  ++outInfo.unusedRangeCount;
12524  outInfo.unusedBytes += unusedRangeSize;
12525  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
12526  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
12527  }
12528  }
12529  break;
12530  case Node::TYPE_SPLIT:
12531  {
12532  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12533  const Node* const leftChild = node->split.leftChild;
12534  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
12535  const Node* const rightChild = leftChild->buddy;
12536  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
12537  }
12538  break;
12539  default:
12540  VMA_ASSERT(0);
12541  }
12542 }
12543 
12544 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
12545 {
12546  VMA_ASSERT(node->type == Node::TYPE_FREE);
12547 
12548  // List is empty.
12549  Node* const frontNode = m_FreeList[level].front;
12550  if(frontNode == VMA_NULL)
12551  {
12552  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
12553  node->free.prev = node->free.next = VMA_NULL;
12554  m_FreeList[level].front = m_FreeList[level].back = node;
12555  }
12556  else
12557  {
12558  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
12559  node->free.prev = VMA_NULL;
12560  node->free.next = frontNode;
12561  frontNode->free.prev = node;
12562  m_FreeList[level].front = node;
12563  }
12564 }
12565 
12566 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
12567 {
12568  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
12569 
12570  // It is at the front.
12571  if(node->free.prev == VMA_NULL)
12572  {
12573  VMA_ASSERT(m_FreeList[level].front == node);
12574  m_FreeList[level].front = node->free.next;
12575  }
12576  else
12577  {
12578  Node* const prevFreeNode = node->free.prev;
12579  VMA_ASSERT(prevFreeNode->free.next == node);
12580  prevFreeNode->free.next = node->free.next;
12581  }
12582 
12583  // It is at the back.
12584  if(node->free.next == VMA_NULL)
12585  {
12586  VMA_ASSERT(m_FreeList[level].back == node);
12587  m_FreeList[level].back = node->free.prev;
12588  }
12589  else
12590  {
12591  Node* const nextFreeNode = node->free.next;
12592  VMA_ASSERT(nextFreeNode->free.prev == node);
12593  nextFreeNode->free.prev = node->free.prev;
12594  }
12595 }
12596 
12597 #if VMA_STATS_STRING_ENABLED
12598 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
12599 {
12600  switch(node->type)
12601  {
12602  case Node::TYPE_FREE:
12603  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
12604  break;
12605  case Node::TYPE_ALLOCATION:
12606  {
12607  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
12608  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12609  if(allocSize < levelNodeSize)
12610  {
12611  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
12612  }
12613  }
12614  break;
12615  case Node::TYPE_SPLIT:
12616  {
12617  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12618  const Node* const leftChild = node->split.leftChild;
12619  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
12620  const Node* const rightChild = leftChild->buddy;
12621  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
12622  }
12623  break;
12624  default:
12625  VMA_ASSERT(0);
12626  }
12627 }
12628 #endif // #if VMA_STATS_STRING_ENABLED
12629 
12630 
12632 // class VmaDeviceMemoryBlock
12633 
12634 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
12635  m_pMetadata(VMA_NULL),
12636  m_MemoryTypeIndex(UINT32_MAX),
12637  m_Id(0),
12638  m_hMemory(VK_NULL_HANDLE),
12639  m_MapCount(0),
12640  m_pMappedData(VMA_NULL)
12641 {
12642 }
12643 
12644 void VmaDeviceMemoryBlock::Init(
12645  VmaAllocator hAllocator,
12646  VmaPool hParentPool,
12647  uint32_t newMemoryTypeIndex,
12648  VkDeviceMemory newMemory,
12649  VkDeviceSize newSize,
12650  uint32_t id,
12651  uint32_t algorithm)
12652 {
12653  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
12654 
12655  m_hParentPool = hParentPool;
12656  m_MemoryTypeIndex = newMemoryTypeIndex;
12657  m_Id = id;
12658  m_hMemory = newMemory;
12659 
12660  switch(algorithm)
12661  {
12663  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
12664  break;
12666  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
12667  break;
12668  default:
12669  VMA_ASSERT(0);
12670  // Fall-through.
12671  case 0:
12672  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
12673  }
12674  m_pMetadata->Init(newSize);
12675 }
12676 
12677 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
12678 {
12679  // This is the most important assert in the entire library.
12680  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
12681  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
12682 
12683  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
12684  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
12685  m_hMemory = VK_NULL_HANDLE;
12686 
12687  vma_delete(allocator, m_pMetadata);
12688  m_pMetadata = VMA_NULL;
12689 }
12690 
12691 bool VmaDeviceMemoryBlock::Validate() const
12692 {
12693  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
12694  (m_pMetadata->GetSize() != 0));
12695 
12696  return m_pMetadata->Validate();
12697 }
12698 
12699 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
12700 {
12701  void* pData = nullptr;
12702  VkResult res = Map(hAllocator, 1, &pData);
12703  if(res != VK_SUCCESS)
12704  {
12705  return res;
12706  }
12707 
12708  res = m_pMetadata->CheckCorruption(pData);
12709 
12710  Unmap(hAllocator, 1);
12711 
12712  return res;
12713 }
12714 
12715 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
12716 {
12717  if(count == 0)
12718  {
12719  return VK_SUCCESS;
12720  }
12721 
12722  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12723  if(m_MapCount != 0)
12724  {
12725  m_MapCount += count;
12726  VMA_ASSERT(m_pMappedData != VMA_NULL);
12727  if(ppData != VMA_NULL)
12728  {
12729  *ppData = m_pMappedData;
12730  }
12731  return VK_SUCCESS;
12732  }
12733  else
12734  {
12735  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
12736  hAllocator->m_hDevice,
12737  m_hMemory,
12738  0, // offset
12739  VK_WHOLE_SIZE,
12740  0, // flags
12741  &m_pMappedData);
12742  if(result == VK_SUCCESS)
12743  {
12744  if(ppData != VMA_NULL)
12745  {
12746  *ppData = m_pMappedData;
12747  }
12748  m_MapCount = count;
12749  }
12750  return result;
12751  }
12752 }
12753 
12754 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
12755 {
12756  if(count == 0)
12757  {
12758  return;
12759  }
12760 
12761  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12762  if(m_MapCount >= count)
12763  {
12764  m_MapCount -= count;
12765  if(m_MapCount == 0)
12766  {
12767  m_pMappedData = VMA_NULL;
12768  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
12769  }
12770  }
12771  else
12772  {
12773  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
12774  }
12775 }
12776 
12777 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12778 {
12779  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12780  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12781 
12782  void* pData;
12783  VkResult res = Map(hAllocator, 1, &pData);
12784  if(res != VK_SUCCESS)
12785  {
12786  return res;
12787  }
12788 
12789  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
12790  VmaWriteMagicValue(pData, allocOffset + allocSize);
12791 
12792  Unmap(hAllocator, 1);
12793 
12794  return VK_SUCCESS;
12795 }
12796 
12797 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12798 {
12799  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12800  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12801 
12802  void* pData;
12803  VkResult res = Map(hAllocator, 1, &pData);
12804  if(res != VK_SUCCESS)
12805  {
12806  return res;
12807  }
12808 
12809  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
12810  {
12811  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
12812  }
12813  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
12814  {
12815  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
12816  }
12817 
12818  Unmap(hAllocator, 1);
12819 
12820  return VK_SUCCESS;
12821 }
12822 
12823 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
12824  const VmaAllocator hAllocator,
12825  const VmaAllocation hAllocation,
12826  VkDeviceSize allocationLocalOffset,
12827  VkBuffer hBuffer,
12828  const void* pNext)
12829 {
12830  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12831  hAllocation->GetBlock() == this);
12832  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12833  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12834  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12835  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12836  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12837  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
12838 }
12839 
12840 VkResult VmaDeviceMemoryBlock::BindImageMemory(
12841  const VmaAllocator hAllocator,
12842  const VmaAllocation hAllocation,
12843  VkDeviceSize allocationLocalOffset,
12844  VkImage hImage,
12845  const void* pNext)
12846 {
12847  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12848  hAllocation->GetBlock() == this);
12849  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12850  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12851  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12852  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12853  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12854  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
12855 }
12856 
12857 static void InitStatInfo(VmaStatInfo& outInfo)
12858 {
12859  memset(&outInfo, 0, sizeof(outInfo));
12860  outInfo.allocationSizeMin = UINT64_MAX;
12861  outInfo.unusedRangeSizeMin = UINT64_MAX;
12862 }
12863 
12864 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
12865 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
12866 {
12867  inoutInfo.blockCount += srcInfo.blockCount;
12868  inoutInfo.allocationCount += srcInfo.allocationCount;
12869  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
12870  inoutInfo.usedBytes += srcInfo.usedBytes;
12871  inoutInfo.unusedBytes += srcInfo.unusedBytes;
12872  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
12873  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
12874  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
12875  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
12876 }
12877 
12878 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
12879 {
12880  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
12881  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
12882  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
12883  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
12884 }
12885 
12886 VmaPool_T::VmaPool_T(
12887  VmaAllocator hAllocator,
12888  const VmaPoolCreateInfo& createInfo,
12889  VkDeviceSize preferredBlockSize) :
12890  m_BlockVector(
12891  hAllocator,
12892  this, // hParentPool
12893  createInfo.memoryTypeIndex,
12894  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
12895  createInfo.minBlockCount,
12896  createInfo.maxBlockCount,
12897  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
12898  createInfo.frameInUseCount,
12899  createInfo.blockSize != 0, // explicitBlockSize
12900  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
12901  createInfo.priority,
12902  VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
12903  createInfo.pMemoryAllocateNext),
12904  m_Id(0),
12905  m_Name(VMA_NULL)
12906 {
12907 }
12908 
12909 VmaPool_T::~VmaPool_T()
12910 {
12911  VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
12912 }
12913 
12914 void VmaPool_T::SetName(const char* pName)
12915 {
12916  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12917  VmaFreeString(allocs, m_Name);
12918 
12919  if(pName != VMA_NULL)
12920  {
12921  m_Name = VmaCreateStringCopy(allocs, pName);
12922  }
12923  else
12924  {
12925  m_Name = VMA_NULL;
12926  }
12927 }
12928 
12929 #if VMA_STATS_STRING_ENABLED
12930 
12931 #endif // #if VMA_STATS_STRING_ENABLED
12932 
12933 VmaBlockVector::VmaBlockVector(
12934  VmaAllocator hAllocator,
12935  VmaPool hParentPool,
12936  uint32_t memoryTypeIndex,
12937  VkDeviceSize preferredBlockSize,
12938  size_t minBlockCount,
12939  size_t maxBlockCount,
12940  VkDeviceSize bufferImageGranularity,
12941  uint32_t frameInUseCount,
12942  bool explicitBlockSize,
12943  uint32_t algorithm,
12944  float priority,
12945  VkDeviceSize minAllocationAlignment,
12946  void* pMemoryAllocateNext) :
12947  m_hAllocator(hAllocator),
12948  m_hParentPool(hParentPool),
12949  m_MemoryTypeIndex(memoryTypeIndex),
12950  m_PreferredBlockSize(preferredBlockSize),
12951  m_MinBlockCount(minBlockCount),
12952  m_MaxBlockCount(maxBlockCount),
12953  m_BufferImageGranularity(bufferImageGranularity),
12954  m_FrameInUseCount(frameInUseCount),
12955  m_ExplicitBlockSize(explicitBlockSize),
12956  m_Algorithm(algorithm),
12957  m_Priority(priority),
12958  m_MinAllocationAlignment(minAllocationAlignment),
12959  m_pMemoryAllocateNext(pMemoryAllocateNext),
12960  m_HasEmptyBlock(false),
12961  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12962  m_NextBlockId(0)
12963 {
12964 }
12965 
12966 VmaBlockVector::~VmaBlockVector()
12967 {
12968  for(size_t i = m_Blocks.size(); i--; )
12969  {
12970  m_Blocks[i]->Destroy(m_hAllocator);
12971  vma_delete(m_hAllocator, m_Blocks[i]);
12972  }
12973 }
12974 
12975 VkResult VmaBlockVector::CreateMinBlocks()
12976 {
12977  for(size_t i = 0; i < m_MinBlockCount; ++i)
12978  {
12979  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12980  if(res != VK_SUCCESS)
12981  {
12982  return res;
12983  }
12984  }
12985  return VK_SUCCESS;
12986 }
12987 
12988 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
12989 {
12990  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12991 
12992  const size_t blockCount = m_Blocks.size();
12993 
12994  pStats->size = 0;
12995  pStats->unusedSize = 0;
12996  pStats->allocationCount = 0;
12997  pStats->unusedRangeCount = 0;
12998  pStats->unusedRangeSizeMax = 0;
12999  pStats->blockCount = blockCount;
13000 
13001  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13002  {
13003  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13004  VMA_ASSERT(pBlock);
13005  VMA_HEAVY_ASSERT(pBlock->Validate());
13006  pBlock->m_pMetadata->AddPoolStats(*pStats);
13007  }
13008 }
13009 
13010 bool VmaBlockVector::IsEmpty()
13011 {
13012  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13013  return m_Blocks.empty();
13014 }
13015 
13016 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
13017 {
13018  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
13019  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
13020  (VMA_DEBUG_MARGIN > 0) &&
13021  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
13022  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
13023 }
13024 
13025 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
13026 
13027 VkResult VmaBlockVector::Allocate(
13028  uint32_t currentFrameIndex,
13029  VkDeviceSize size,
13030  VkDeviceSize alignment,
13031  const VmaAllocationCreateInfo& createInfo,
13032  VmaSuballocationType suballocType,
13033  size_t allocationCount,
13034  VmaAllocation* pAllocations)
13035 {
13036  size_t allocIndex;
13037  VkResult res = VK_SUCCESS;
13038 
13039  alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
13040 
13041  if(IsCorruptionDetectionEnabled())
13042  {
13043  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
13044  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
13045  }
13046 
13047  {
13048  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13049  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
13050  {
13051  res = AllocatePage(
13052  currentFrameIndex,
13053  size,
13054  alignment,
13055  createInfo,
13056  suballocType,
13057  pAllocations + allocIndex);
13058  if(res != VK_SUCCESS)
13059  {
13060  break;
13061  }
13062  }
13063  }
13064 
13065  if(res != VK_SUCCESS)
13066  {
13067  // Free all already created allocations.
13068  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13069  while(allocIndex--)
13070  {
13071  VmaAllocation_T* const alloc = pAllocations[allocIndex];
13072  const VkDeviceSize allocSize = alloc->GetSize();
13073  Free(alloc);
13074  m_hAllocator->m_Budget.RemoveAllocation(heapIndex, allocSize);
13075  }
13076  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
13077  }
13078 
13079  return res;
13080 }
13081 
13082 VkResult VmaBlockVector::AllocatePage(
13083  uint32_t currentFrameIndex,
13084  VkDeviceSize size,
13085  VkDeviceSize alignment,
13086  const VmaAllocationCreateInfo& createInfo,
13087  VmaSuballocationType suballocType,
13088  VmaAllocation* pAllocation)
13089 {
13090  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13091  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
13092  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13093  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13094 
13095  VkDeviceSize freeMemory;
13096  {
13097  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13098  VmaBudget heapBudget = {};
13099  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
13100  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
13101  }
13102 
13103  const bool canFallbackToDedicated = !IsCustomPool();
13104  const bool canCreateNewBlock =
13105  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
13106  (m_Blocks.size() < m_MaxBlockCount) &&
13107  (freeMemory >= size || !canFallbackToDedicated);
13108  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
13109 
13110  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
13111  // Which in turn is available only when maxBlockCount = 1.
13112  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
13113  {
13114  canMakeOtherLost = false;
13115  }
13116 
13117  // Upper address can only be used with linear allocator and within single memory block.
13118  if(isUpperAddress &&
13119  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
13120  {
13121  return VK_ERROR_FEATURE_NOT_PRESENT;
13122  }
13123 
13124  // Validate strategy.
13125  switch(strategy)
13126  {
13127  case 0:
13129  break;
13133  break;
13134  default:
13135  return VK_ERROR_FEATURE_NOT_PRESENT;
13136  }
13137 
13138  // Early reject: requested allocation size is larger that maximum block size for this block vector.
13139  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
13140  {
13141  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13142  }
13143 
13144  /*
13145  Under certain condition, this whole section can be skipped for optimization, so
13146  we move on directly to trying to allocate with canMakeOtherLost. That's the case
13147  e.g. for custom pools with linear algorithm.
13148  */
13149  if(!canMakeOtherLost || canCreateNewBlock)
13150  {
13151  // 1. Search existing allocations. Try to allocate without making other allocations lost.
13152  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
13154 
13155  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13156  {
13157  // Use only last block.
13158  if(!m_Blocks.empty())
13159  {
13160  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
13161  VMA_ASSERT(pCurrBlock);
13162  VkResult res = AllocateFromBlock(
13163  pCurrBlock,
13164  currentFrameIndex,
13165  size,
13166  alignment,
13167  allocFlagsCopy,
13168  createInfo.pUserData,
13169  suballocType,
13170  strategy,
13171  pAllocation);
13172  if(res == VK_SUCCESS)
13173  {
13174  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
13175  return VK_SUCCESS;
13176  }
13177  }
13178  }
13179  else
13180  {
13182  {
13183  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
13184  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
13185  {
13186  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13187  VMA_ASSERT(pCurrBlock);
13188  VkResult res = AllocateFromBlock(
13189  pCurrBlock,
13190  currentFrameIndex,
13191  size,
13192  alignment,
13193  allocFlagsCopy,
13194  createInfo.pUserData,
13195  suballocType,
13196  strategy,
13197  pAllocation);
13198  if(res == VK_SUCCESS)
13199  {
13200  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
13201  return VK_SUCCESS;
13202  }
13203  }
13204  }
13205  else // WORST_FIT, FIRST_FIT
13206  {
13207  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
13208  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13209  {
13210  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13211  VMA_ASSERT(pCurrBlock);
13212  VkResult res = AllocateFromBlock(
13213  pCurrBlock,
13214  currentFrameIndex,
13215  size,
13216  alignment,
13217  allocFlagsCopy,
13218  createInfo.pUserData,
13219  suballocType,
13220  strategy,
13221  pAllocation);
13222  if(res == VK_SUCCESS)
13223  {
13224  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
13225  return VK_SUCCESS;
13226  }
13227  }
13228  }
13229  }
13230 
13231  // 2. Try to create new block.
13232  if(canCreateNewBlock)
13233  {
13234  // Calculate optimal size for new block.
13235  VkDeviceSize newBlockSize = m_PreferredBlockSize;
13236  uint32_t newBlockSizeShift = 0;
13237  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
13238 
13239  if(!m_ExplicitBlockSize)
13240  {
13241  // Allocate 1/8, 1/4, 1/2 as first blocks.
13242  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
13243  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
13244  {
13245  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
13246  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
13247  {
13248  newBlockSize = smallerNewBlockSize;
13249  ++newBlockSizeShift;
13250  }
13251  else
13252  {
13253  break;
13254  }
13255  }
13256  }
13257 
13258  size_t newBlockIndex = 0;
13259  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
13260  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
13261  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
13262  if(!m_ExplicitBlockSize)
13263  {
13264  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
13265  {
13266  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
13267  if(smallerNewBlockSize >= size)
13268  {
13269  newBlockSize = smallerNewBlockSize;
13270  ++newBlockSizeShift;
13271  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
13272  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
13273  }
13274  else
13275  {
13276  break;
13277  }
13278  }
13279  }
13280 
13281  if(res == VK_SUCCESS)
13282  {
13283  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
13284  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
13285 
13286  res = AllocateFromBlock(
13287  pBlock,
13288  currentFrameIndex,
13289  size,
13290  alignment,
13291  allocFlagsCopy,
13292  createInfo.pUserData,
13293  suballocType,
13294  strategy,
13295  pAllocation);
13296  if(res == VK_SUCCESS)
13297  {
13298  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
13299  return VK_SUCCESS;
13300  }
13301  else
13302  {
13303  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
13304  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13305  }
13306  }
13307  }
13308  }
13309 
13310  // 3. Try to allocate from existing blocks with making other allocations lost.
13311  if(canMakeOtherLost)
13312  {
13313  uint32_t tryIndex = 0;
13314  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
13315  {
13316  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
13317  VmaAllocationRequest bestRequest = {};
13318  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
13319 
13320  // 1. Search existing allocations.
13322  {
13323  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
13324  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
13325  {
13326  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13327  VMA_ASSERT(pCurrBlock);
13328  VmaAllocationRequest currRequest = {};
13329  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
13330  currentFrameIndex,
13331  m_FrameInUseCount,
13332  m_BufferImageGranularity,
13333  size,
13334  alignment,
13335  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
13336  suballocType,
13337  canMakeOtherLost,
13338  strategy,
13339  &currRequest))
13340  {
13341  const VkDeviceSize currRequestCost = currRequest.CalcCost();
13342  if(pBestRequestBlock == VMA_NULL ||
13343  currRequestCost < bestRequestCost)
13344  {
13345  pBestRequestBlock = pCurrBlock;
13346  bestRequest = currRequest;
13347  bestRequestCost = currRequestCost;
13348 
13349  if(bestRequestCost == 0)
13350  {
13351  break;
13352  }
13353  }
13354  }
13355  }
13356  }
13357  else // WORST_FIT, FIRST_FIT
13358  {
13359  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
13360  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13361  {
13362  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13363  VMA_ASSERT(pCurrBlock);
13364  VmaAllocationRequest currRequest = {};
13365  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
13366  currentFrameIndex,
13367  m_FrameInUseCount,
13368  m_BufferImageGranularity,
13369  size,
13370  alignment,
13371  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
13372  suballocType,
13373  canMakeOtherLost,
13374  strategy,
13375  &currRequest))
13376  {
13377  const VkDeviceSize currRequestCost = currRequest.CalcCost();
13378  if(pBestRequestBlock == VMA_NULL ||
13379  currRequestCost < bestRequestCost ||
13381  {
13382  pBestRequestBlock = pCurrBlock;
13383  bestRequest = currRequest;
13384  bestRequestCost = currRequestCost;
13385 
13386  if(bestRequestCost == 0 ||
13388  {
13389  break;
13390  }
13391  }
13392  }
13393  }
13394  }
13395 
13396  if(pBestRequestBlock != VMA_NULL)
13397  {
13398  if(mapped)
13399  {
13400  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
13401  if(res != VK_SUCCESS)
13402  {
13403  return res;
13404  }
13405  }
13406 
13407  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
13408  currentFrameIndex,
13409  m_FrameInUseCount,
13410  &bestRequest))
13411  {
13412  // Allocate from this pBlock.
13413  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13414  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
13415  UpdateHasEmptyBlock();
13416  (*pAllocation)->InitBlockAllocation(
13417  pBestRequestBlock,
13418  bestRequest.offset,
13419  alignment,
13420  size,
13421  m_MemoryTypeIndex,
13422  suballocType,
13423  mapped,
13424  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13425  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
13426  VMA_DEBUG_LOG(" Returned from existing block");
13427  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
13428  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13429  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13430  {
13431  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13432  }
13433  if(IsCorruptionDetectionEnabled())
13434  {
13435  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
13436  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13437  }
13438  return VK_SUCCESS;
13439  }
13440  // else: Some allocations must have been touched while we are here. Next try.
13441  }
13442  else
13443  {
13444  // Could not find place in any of the blocks - break outer loop.
13445  break;
13446  }
13447  }
13448  /* Maximum number of tries exceeded - a very unlike event when many other
13449  threads are simultaneously touching allocations making it impossible to make
13450  lost at the same time as we try to allocate. */
13451  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
13452  {
13453  return VK_ERROR_TOO_MANY_OBJECTS;
13454  }
13455  }
13456 
13457  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13458 }
13459 
13460 void VmaBlockVector::Free(
13461  const VmaAllocation hAllocation)
13462 {
13463  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
13464 
13465  bool budgetExceeded = false;
13466  {
13467  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13468  VmaBudget heapBudget = {};
13469  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
13470  budgetExceeded = heapBudget.usage >= heapBudget.budget;
13471  }
13472 
13473  // Scope for lock.
13474  {
13475  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13476 
13477  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13478 
13479  if(IsCorruptionDetectionEnabled())
13480  {
13481  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
13482  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
13483  }
13484 
13485  if(hAllocation->IsPersistentMap())
13486  {
13487  pBlock->Unmap(m_hAllocator, 1);
13488  }
13489 
13490  pBlock->m_pMetadata->Free(hAllocation);
13491  VMA_HEAVY_ASSERT(pBlock->Validate());
13492 
13493  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
13494 
13495  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
13496  // pBlock became empty after this deallocation.
13497  if(pBlock->m_pMetadata->IsEmpty())
13498  {
13499  // Already has empty block. We don't want to have two, so delete this one.
13500  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
13501  {
13502  pBlockToDelete = pBlock;
13503  Remove(pBlock);
13504  }
13505  // else: We now have an empty block - leave it.
13506  }
13507  // pBlock didn't become empty, but we have another empty block - find and free that one.
13508  // (This is optional, heuristics.)
13509  else if(m_HasEmptyBlock && canDeleteBlock)
13510  {
13511  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
13512  if(pLastBlock->m_pMetadata->IsEmpty())
13513  {
13514  pBlockToDelete = pLastBlock;
13515  m_Blocks.pop_back();
13516  }
13517  }
13518 
13519  UpdateHasEmptyBlock();
13520  IncrementallySortBlocks();
13521  }
13522 
13523  // Destruction of a free block. Deferred until this point, outside of mutex
13524  // lock, for performance reason.
13525  if(pBlockToDelete != VMA_NULL)
13526  {
13527  VMA_DEBUG_LOG(" Deleted empty block");
13528  pBlockToDelete->Destroy(m_hAllocator);
13529  vma_delete(m_hAllocator, pBlockToDelete);
13530  }
13531 }
13532 
13533 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
13534 {
13535  VkDeviceSize result = 0;
13536  for(size_t i = m_Blocks.size(); i--; )
13537  {
13538  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
13539  if(result >= m_PreferredBlockSize)
13540  {
13541  break;
13542  }
13543  }
13544  return result;
13545 }
13546 
13547 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
13548 {
13549  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13550  {
13551  if(m_Blocks[blockIndex] == pBlock)
13552  {
13553  VmaVectorRemove(m_Blocks, blockIndex);
13554  return;
13555  }
13556  }
13557  VMA_ASSERT(0);
13558 }
13559 
13560 void VmaBlockVector::IncrementallySortBlocks()
13561 {
13562  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13563  {
13564  // Bubble sort only until first swap.
13565  for(size_t i = 1; i < m_Blocks.size(); ++i)
13566  {
13567  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
13568  {
13569  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
13570  return;
13571  }
13572  }
13573  }
13574 }
13575 
13576 VkResult VmaBlockVector::AllocateFromBlock(
13577  VmaDeviceMemoryBlock* pBlock,
13578  uint32_t currentFrameIndex,
13579  VkDeviceSize size,
13580  VkDeviceSize alignment,
13581  VmaAllocationCreateFlags allocFlags,
13582  void* pUserData,
13583  VmaSuballocationType suballocType,
13584  uint32_t strategy,
13585  VmaAllocation* pAllocation)
13586 {
13587  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
13588  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13589  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13590  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13591 
13592  VmaAllocationRequest currRequest = {};
13593  if(pBlock->m_pMetadata->CreateAllocationRequest(
13594  currentFrameIndex,
13595  m_FrameInUseCount,
13596  m_BufferImageGranularity,
13597  size,
13598  alignment,
13599  isUpperAddress,
13600  suballocType,
13601  false, // canMakeOtherLost
13602  strategy,
13603  &currRequest))
13604  {
13605  // Allocate from pCurrBlock.
13606  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
13607 
13608  if(mapped)
13609  {
13610  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
13611  if(res != VK_SUCCESS)
13612  {
13613  return res;
13614  }
13615  }
13616 
13617  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13618  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
13619  UpdateHasEmptyBlock();
13620  (*pAllocation)->InitBlockAllocation(
13621  pBlock,
13622  currRequest.offset,
13623  alignment,
13624  size,
13625  m_MemoryTypeIndex,
13626  suballocType,
13627  mapped,
13628  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13629  VMA_HEAVY_ASSERT(pBlock->Validate());
13630  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
13631  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13632  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13633  {
13634  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13635  }
13636  if(IsCorruptionDetectionEnabled())
13637  {
13638  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
13639  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13640  }
13641  return VK_SUCCESS;
13642  }
13643  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13644 }
13645 
13646 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
13647 {
13648  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
13649  allocInfo.pNext = m_pMemoryAllocateNext;
13650  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
13651  allocInfo.allocationSize = blockSize;
13652 
13653 #if VMA_BUFFER_DEVICE_ADDRESS
13654  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
13655  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
13656  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
13657  {
13658  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
13659  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
13660  }
13661 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
13662 
13663 #if VMA_MEMORY_PRIORITY
13664  VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
13665  if(m_hAllocator->m_UseExtMemoryPriority)
13666  {
13667  priorityInfo.priority = m_Priority;
13668  VmaPnextChainPushFront(&allocInfo, &priorityInfo);
13669  }
13670 #endif // #if VMA_MEMORY_PRIORITY
13671 
13672  VkDeviceMemory mem = VK_NULL_HANDLE;
13673  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
13674  if(res < 0)
13675  {
13676  return res;
13677  }
13678 
13679  // New VkDeviceMemory successfully created.
13680 
13681  // Create new Allocation for it.
13682  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
13683  pBlock->Init(
13684  m_hAllocator,
13685  m_hParentPool,
13686  m_MemoryTypeIndex,
13687  mem,
13688  allocInfo.allocationSize,
13689  m_NextBlockId++,
13690  m_Algorithm);
13691 
13692  m_Blocks.push_back(pBlock);
13693  if(pNewBlockIndex != VMA_NULL)
13694  {
13695  *pNewBlockIndex = m_Blocks.size() - 1;
13696  }
13697 
13698  return VK_SUCCESS;
13699 }
13700 
13701 void VmaBlockVector::ApplyDefragmentationMovesCpu(
13702  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13703  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
13704 {
13705  const size_t blockCount = m_Blocks.size();
13706  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
13707 
13708  enum BLOCK_FLAG
13709  {
13710  BLOCK_FLAG_USED = 0x00000001,
13711  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
13712  };
13713 
13714  struct BlockInfo
13715  {
13716  uint32_t flags;
13717  void* pMappedData;
13718  };
13719  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
13720  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
13721  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
13722 
13723  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13724  const size_t moveCount = moves.size();
13725  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13726  {
13727  const VmaDefragmentationMove& move = moves[moveIndex];
13728  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
13729  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
13730  }
13731 
13732  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13733 
13734  // Go over all blocks. Get mapped pointer or map if necessary.
13735  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13736  {
13737  BlockInfo& currBlockInfo = blockInfo[blockIndex];
13738  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13739  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
13740  {
13741  currBlockInfo.pMappedData = pBlock->GetMappedData();
13742  // It is not originally mapped - map it.
13743  if(currBlockInfo.pMappedData == VMA_NULL)
13744  {
13745  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
13746  if(pDefragCtx->res == VK_SUCCESS)
13747  {
13748  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
13749  }
13750  }
13751  }
13752  }
13753 
13754  // Go over all moves. Do actual data transfer.
13755  if(pDefragCtx->res == VK_SUCCESS)
13756  {
13757  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13758  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13759 
13760  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13761  {
13762  const VmaDefragmentationMove& move = moves[moveIndex];
13763 
13764  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
13765  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
13766 
13767  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
13768 
13769  // Invalidate source.
13770  if(isNonCoherent)
13771  {
13772  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
13773  memRange.memory = pSrcBlock->GetDeviceMemory();
13774  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
13775  memRange.size = VMA_MIN(
13776  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
13777  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
13778  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13779  }
13780 
13781  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
13782  memmove(
13783  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
13784  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
13785  static_cast<size_t>(move.size));
13786 
13787  if(IsCorruptionDetectionEnabled())
13788  {
13789  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
13790  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
13791  }
13792 
13793  // Flush destination.
13794  if(isNonCoherent)
13795  {
13796  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
13797  memRange.memory = pDstBlock->GetDeviceMemory();
13798  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
13799  memRange.size = VMA_MIN(
13800  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
13801  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
13802  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13803  }
13804  }
13805  }
13806 
13807  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
13808  // Regardless of pCtx->res == VK_SUCCESS.
13809  for(size_t blockIndex = blockCount; blockIndex--; )
13810  {
13811  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
13812  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
13813  {
13814  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13815  pBlock->Unmap(m_hAllocator, 1);
13816  }
13817  }
13818 }
13819 
13820 void VmaBlockVector::ApplyDefragmentationMovesGpu(
13821  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13822  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13823  VkCommandBuffer commandBuffer)
13824 {
13825  const size_t blockCount = m_Blocks.size();
13826 
13827  pDefragCtx->blockContexts.resize(blockCount);
13828  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
13829 
13830  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13831  const size_t moveCount = moves.size();
13832  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13833  {
13834  const VmaDefragmentationMove& move = moves[moveIndex];
13835 
13836  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
13837  {
13838  // Old school move still require us to map the whole block
13839  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13840  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13841  }
13842  }
13843 
13844  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13845 
13846  // Go over all blocks. Create and bind buffer for whole block if necessary.
13847  {
13848  VkBufferCreateInfo bufCreateInfo;
13849  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
13850 
13851  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13852  {
13853  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
13854  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13855  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
13856  {
13857  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
13858  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
13859  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
13860  if(pDefragCtx->res == VK_SUCCESS)
13861  {
13862  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
13863  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
13864  }
13865  }
13866  }
13867  }
13868 
13869  // Go over all moves. Post data transfer commands to command buffer.
13870  if(pDefragCtx->res == VK_SUCCESS)
13871  {
13872  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13873  {
13874  const VmaDefragmentationMove& move = moves[moveIndex];
13875 
13876  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
13877  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
13878 
13879  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
13880 
13881  VkBufferCopy region = {
13882  move.srcOffset,
13883  move.dstOffset,
13884  move.size };
13885  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
13886  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
13887  }
13888  }
13889 
13890  // Save buffers to defrag context for later destruction.
13891  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
13892  {
13893  pDefragCtx->res = VK_NOT_READY;
13894  }
13895 }
13896 
13897 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
13898 {
13899  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13900  {
13901  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13902  if(pBlock->m_pMetadata->IsEmpty())
13903  {
13904  if(m_Blocks.size() > m_MinBlockCount)
13905  {
13906  if(pDefragmentationStats != VMA_NULL)
13907  {
13908  ++pDefragmentationStats->deviceMemoryBlocksFreed;
13909  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
13910  }
13911 
13912  VmaVectorRemove(m_Blocks, blockIndex);
13913  pBlock->Destroy(m_hAllocator);
13914  vma_delete(m_hAllocator, pBlock);
13915  }
13916  else
13917  {
13918  break;
13919  }
13920  }
13921  }
13922  UpdateHasEmptyBlock();
13923 }
13924 
13925 void VmaBlockVector::UpdateHasEmptyBlock()
13926 {
13927  m_HasEmptyBlock = false;
13928  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
13929  {
13930  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
13931  if(pBlock->m_pMetadata->IsEmpty())
13932  {
13933  m_HasEmptyBlock = true;
13934  break;
13935  }
13936  }
13937 }
13938 
13939 #if VMA_STATS_STRING_ENABLED
13940 
13941 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13942 {
13943  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13944 
13945  json.BeginObject();
13946 
13947  if(IsCustomPool())
13948  {
13949  const char* poolName = m_hParentPool->GetName();
13950  if(poolName != VMA_NULL && poolName[0] != '\0')
13951  {
13952  json.WriteString("Name");
13953  json.WriteString(poolName);
13954  }
13955 
13956  json.WriteString("MemoryTypeIndex");
13957  json.WriteNumber(m_MemoryTypeIndex);
13958 
13959  json.WriteString("BlockSize");
13960  json.WriteNumber(m_PreferredBlockSize);
13961 
13962  json.WriteString("BlockCount");
13963  json.BeginObject(true);
13964  if(m_MinBlockCount > 0)
13965  {
13966  json.WriteString("Min");
13967  json.WriteNumber((uint64_t)m_MinBlockCount);
13968  }
13969  if(m_MaxBlockCount < SIZE_MAX)
13970  {
13971  json.WriteString("Max");
13972  json.WriteNumber((uint64_t)m_MaxBlockCount);
13973  }
13974  json.WriteString("Cur");
13975  json.WriteNumber((uint64_t)m_Blocks.size());
13976  json.EndObject();
13977 
13978  if(m_FrameInUseCount > 0)
13979  {
13980  json.WriteString("FrameInUseCount");
13981  json.WriteNumber(m_FrameInUseCount);
13982  }
13983 
13984  if(m_Algorithm != 0)
13985  {
13986  json.WriteString("Algorithm");
13987  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
13988  }
13989  }
13990  else
13991  {
13992  json.WriteString("PreferredBlockSize");
13993  json.WriteNumber(m_PreferredBlockSize);
13994  }
13995 
13996  json.WriteString("Blocks");
13997  json.BeginObject();
13998  for(size_t i = 0; i < m_Blocks.size(); ++i)
13999  {
14000  json.BeginString();
14001  json.ContinueString(m_Blocks[i]->GetId());
14002  json.EndString();
14003 
14004  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
14005  }
14006  json.EndObject();
14007 
14008  json.EndObject();
14009 }
14010 
14011 #endif // #if VMA_STATS_STRING_ENABLED
14012 
14013 void VmaBlockVector::Defragment(
14014  class VmaBlockVectorDefragmentationContext* pCtx,
14016  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
14017  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
14018  VkCommandBuffer commandBuffer)
14019 {
14020  pCtx->res = VK_SUCCESS;
14021 
14022  const VkMemoryPropertyFlags memPropFlags =
14023  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
14024  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
14025 
14026  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
14027  isHostVisible;
14028  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
14029  !IsCorruptionDetectionEnabled() &&
14030  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
14031 
14032  // There are options to defragment this memory type.
14033  if(canDefragmentOnCpu || canDefragmentOnGpu)
14034  {
14035  bool defragmentOnGpu;
14036  // There is only one option to defragment this memory type.
14037  if(canDefragmentOnGpu != canDefragmentOnCpu)
14038  {
14039  defragmentOnGpu = canDefragmentOnGpu;
14040  }
14041  // Both options are available: Heuristics to choose the best one.
14042  else
14043  {
14044  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
14045  m_hAllocator->IsIntegratedGpu();
14046  }
14047 
14048  bool overlappingMoveSupported = !defragmentOnGpu;
14049 
14050  if(m_hAllocator->m_UseMutex)
14051  {
14053  {
14054  if(!m_Mutex.TryLockWrite())
14055  {
14056  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
14057  return;
14058  }
14059  }
14060  else
14061  {
14062  m_Mutex.LockWrite();
14063  pCtx->mutexLocked = true;
14064  }
14065  }
14066 
14067  pCtx->Begin(overlappingMoveSupported, flags);
14068 
14069  // Defragment.
14070 
14071  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
14072  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
14073  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
14074 
14075  // Accumulate statistics.
14076  if(pStats != VMA_NULL)
14077  {
14078  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
14079  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
14080  pStats->bytesMoved += bytesMoved;
14081  pStats->allocationsMoved += allocationsMoved;
14082  VMA_ASSERT(bytesMoved <= maxBytesToMove);
14083  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
14084  if(defragmentOnGpu)
14085  {
14086  maxGpuBytesToMove -= bytesMoved;
14087  maxGpuAllocationsToMove -= allocationsMoved;
14088  }
14089  else
14090  {
14091  maxCpuBytesToMove -= bytesMoved;
14092  maxCpuAllocationsToMove -= allocationsMoved;
14093  }
14094  }
14095 
14097  {
14098  if(m_hAllocator->m_UseMutex)
14099  m_Mutex.UnlockWrite();
14100 
14101  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
14102  pCtx->res = VK_NOT_READY;
14103 
14104  return;
14105  }
14106 
14107  if(pCtx->res >= VK_SUCCESS)
14108  {
14109  if(defragmentOnGpu)
14110  {
14111  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
14112  }
14113  else
14114  {
14115  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
14116  }
14117  }
14118  }
14119 }
14120 
14121 void VmaBlockVector::DefragmentationEnd(
14122  class VmaBlockVectorDefragmentationContext* pCtx,
14123  uint32_t flags,
14124  VmaDefragmentationStats* pStats)
14125 {
14126  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
14127  {
14128  VMA_ASSERT(pCtx->mutexLocked == false);
14129 
14130  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
14131  // lock protecting us. Since we mutate state here, we have to take the lock out now
14132  m_Mutex.LockWrite();
14133  pCtx->mutexLocked = true;
14134  }
14135 
14136  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
14137  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
14138  {
14139  // Destroy buffers.
14140  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
14141  {
14142  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
14143  if(blockCtx.hBuffer)
14144  {
14145  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
14146  }
14147  }
14148 
14149  if(pCtx->res >= VK_SUCCESS)
14150  {
14151  FreeEmptyBlocks(pStats);
14152  }
14153  }
14154 
14155  if(pCtx->mutexLocked)
14156  {
14157  VMA_ASSERT(m_hAllocator->m_UseMutex);
14158  m_Mutex.UnlockWrite();
14159  }
14160 }
14161 
14162 uint32_t VmaBlockVector::ProcessDefragmentations(
14163  class VmaBlockVectorDefragmentationContext *pCtx,
14164  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
14165 {
14166  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
14167 
14168  const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
14169 
14170  for(uint32_t i = 0; i < moveCount; ++ i)
14171  {
14172  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
14173 
14174  pMove->allocation = move.hAllocation;
14175  pMove->memory = move.pDstBlock->GetDeviceMemory();
14176  pMove->offset = move.dstOffset;
14177 
14178  ++ pMove;
14179  }
14180 
14181  pCtx->defragmentationMovesProcessed += moveCount;
14182 
14183  return moveCount;
14184 }
14185 
14186 void VmaBlockVector::CommitDefragmentations(
14187  class VmaBlockVectorDefragmentationContext *pCtx,
14188  VmaDefragmentationStats* pStats)
14189 {
14190  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
14191 
14192  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
14193  {
14194  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
14195 
14196  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
14197  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
14198  }
14199 
14200  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
14201  FreeEmptyBlocks(pStats);
14202 }
14203 
14204 size_t VmaBlockVector::CalcAllocationCount() const
14205 {
14206  size_t result = 0;
14207  for(size_t i = 0; i < m_Blocks.size(); ++i)
14208  {
14209  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
14210  }
14211  return result;
14212 }
14213 
14214 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
14215 {
14216  if(m_BufferImageGranularity == 1)
14217  {
14218  return false;
14219  }
14220  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
14221  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
14222  {
14223  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
14224  VMA_ASSERT(m_Algorithm == 0);
14225  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
14226  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
14227  {
14228  return true;
14229  }
14230  }
14231  return false;
14232 }
14233 
14234 void VmaBlockVector::MakePoolAllocationsLost(
14235  uint32_t currentFrameIndex,
14236  size_t* pLostAllocationCount)
14237 {
14238  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
14239  size_t lostAllocationCount = 0;
14240  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
14241  {
14242  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
14243  VMA_ASSERT(pBlock);
14244  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
14245  }
14246  if(pLostAllocationCount != VMA_NULL)
14247  {
14248  *pLostAllocationCount = lostAllocationCount;
14249  }
14250 }
14251 
14252 VkResult VmaBlockVector::CheckCorruption()
14253 {
14254  if(!IsCorruptionDetectionEnabled())
14255  {
14256  return VK_ERROR_FEATURE_NOT_PRESENT;
14257  }
14258 
14259  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
14260  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
14261  {
14262  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
14263  VMA_ASSERT(pBlock);
14264  VkResult res = pBlock->CheckCorruption(m_hAllocator);
14265  if(res != VK_SUCCESS)
14266  {
14267  return res;
14268  }
14269  }
14270  return VK_SUCCESS;
14271 }
14272 
14273 void VmaBlockVector::AddStats(VmaStats* pStats)
14274 {
14275  const uint32_t memTypeIndex = m_MemoryTypeIndex;
14276  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
14277 
14278  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
14279 
14280  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
14281  {
14282  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
14283  VMA_ASSERT(pBlock);
14284  VMA_HEAVY_ASSERT(pBlock->Validate());
14285  VmaStatInfo allocationStatInfo;
14286  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
14287  VmaAddStatInfo(pStats->total, allocationStatInfo);
14288  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
14289  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
14290  }
14291 }
14292 
14294 // VmaDefragmentationAlgorithm_Generic members definition
14295 
14296 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
14297  VmaAllocator hAllocator,
14298  VmaBlockVector* pBlockVector,
14299  uint32_t currentFrameIndex,
14300  bool overlappingMoveSupported) :
14301  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14302  m_AllocationCount(0),
14303  m_AllAllocations(false),
14304  m_BytesMoved(0),
14305  m_AllocationsMoved(0),
14306  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
14307 {
14308  // Create block info for each block.
14309  const size_t blockCount = m_pBlockVector->m_Blocks.size();
14310  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14311  {
14312  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
14313  pBlockInfo->m_OriginalBlockIndex = blockIndex;
14314  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
14315  m_Blocks.push_back(pBlockInfo);
14316  }
14317 
14318  // Sort them by m_pBlock pointer value.
14319  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
14320 }
14321 
14322 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
14323 {
14324  for(size_t i = m_Blocks.size(); i--; )
14325  {
14326  vma_delete(m_hAllocator, m_Blocks[i]);
14327  }
14328 }
14329 
14330 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14331 {
14332  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
14333  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
14334  {
14335  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
14336  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
14337  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
14338  {
14339  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
14340  (*it)->m_Allocations.push_back(allocInfo);
14341  }
14342  else
14343  {
14344  VMA_ASSERT(0);
14345  }
14346 
14347  ++m_AllocationCount;
14348  }
14349 }
14350 
14351 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
14352  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14353  VkDeviceSize maxBytesToMove,
14354  uint32_t maxAllocationsToMove,
14355  bool freeOldAllocations)
14356 {
14357  if(m_Blocks.empty())
14358  {
14359  return VK_SUCCESS;
14360  }
14361 
14362  // This is a choice based on research.
14363  // Option 1:
14364  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
14365  // Option 2:
14366  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
14367  // Option 3:
14368  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
14369 
14370  size_t srcBlockMinIndex = 0;
14371  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
14372  /*
14373  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
14374  {
14375  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
14376  if(blocksWithNonMovableCount > 0)
14377  {
14378  srcBlockMinIndex = blocksWithNonMovableCount - 1;
14379  }
14380  }
14381  */
14382 
14383  size_t srcBlockIndex = m_Blocks.size() - 1;
14384  size_t srcAllocIndex = SIZE_MAX;
14385  for(;;)
14386  {
14387  // 1. Find next allocation to move.
14388  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
14389  // 1.2. Then start from last to first m_Allocations.
14390  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
14391  {
14392  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
14393  {
14394  // Finished: no more allocations to process.
14395  if(srcBlockIndex == srcBlockMinIndex)
14396  {
14397  return VK_SUCCESS;
14398  }
14399  else
14400  {
14401  --srcBlockIndex;
14402  srcAllocIndex = SIZE_MAX;
14403  }
14404  }
14405  else
14406  {
14407  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
14408  }
14409  }
14410 
14411  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
14412  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
14413 
14414  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
14415  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
14416  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
14417  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
14418 
14419  // 2. Try to find new place for this allocation in preceding or current block.
14420  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
14421  {
14422  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
14423  VmaAllocationRequest dstAllocRequest;
14424  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
14425  m_CurrentFrameIndex,
14426  m_pBlockVector->GetFrameInUseCount(),
14427  m_pBlockVector->GetBufferImageGranularity(),
14428  size,
14429  alignment,
14430  false, // upperAddress
14431  suballocType,
14432  false, // canMakeOtherLost
14433  strategy,
14434  &dstAllocRequest) &&
14435  MoveMakesSense(
14436  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
14437  {
14438  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
14439 
14440  // Reached limit on number of allocations or bytes to move.
14441  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
14442  (m_BytesMoved + size > maxBytesToMove))
14443  {
14444  return VK_SUCCESS;
14445  }
14446 
14447  VmaDefragmentationMove move = {};
14448  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
14449  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
14450  move.srcOffset = srcOffset;
14451  move.dstOffset = dstAllocRequest.offset;
14452  move.size = size;
14453  move.hAllocation = allocInfo.m_hAllocation;
14454  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
14455  move.pDstBlock = pDstBlockInfo->m_pBlock;
14456 
14457  moves.push_back(move);
14458 
14459  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
14460  dstAllocRequest,
14461  suballocType,
14462  size,
14463  allocInfo.m_hAllocation);
14464 
14465  if(freeOldAllocations)
14466  {
14467  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
14468  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
14469  }
14470 
14471  if(allocInfo.m_pChanged != VMA_NULL)
14472  {
14473  *allocInfo.m_pChanged = VK_TRUE;
14474  }
14475 
14476  ++m_AllocationsMoved;
14477  m_BytesMoved += size;
14478 
14479  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
14480 
14481  break;
14482  }
14483  }
14484 
14485  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
14486 
14487  if(srcAllocIndex > 0)
14488  {
14489  --srcAllocIndex;
14490  }
14491  else
14492  {
14493  if(srcBlockIndex > 0)
14494  {
14495  --srcBlockIndex;
14496  srcAllocIndex = SIZE_MAX;
14497  }
14498  else
14499  {
14500  return VK_SUCCESS;
14501  }
14502  }
14503  }
14504 }
14505 
14506 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
14507 {
14508  size_t result = 0;
14509  for(size_t i = 0; i < m_Blocks.size(); ++i)
14510  {
14511  if(m_Blocks[i]->m_HasNonMovableAllocations)
14512  {
14513  ++result;
14514  }
14515  }
14516  return result;
14517 }
14518 
14519 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
14520  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14521  VkDeviceSize maxBytesToMove,
14522  uint32_t maxAllocationsToMove,
14524 {
14525  if(!m_AllAllocations && m_AllocationCount == 0)
14526  {
14527  return VK_SUCCESS;
14528  }
14529 
14530  const size_t blockCount = m_Blocks.size();
14531  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14532  {
14533  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
14534 
14535  if(m_AllAllocations)
14536  {
14537  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
14538  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
14539  it != pMetadata->m_Suballocations.end();
14540  ++it)
14541  {
14542  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
14543  {
14544  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
14545  pBlockInfo->m_Allocations.push_back(allocInfo);
14546  }
14547  }
14548  }
14549 
14550  pBlockInfo->CalcHasNonMovableAllocations();
14551 
14552  // This is a choice based on research.
14553  // Option 1:
14554  pBlockInfo->SortAllocationsByOffsetDescending();
14555  // Option 2:
14556  //pBlockInfo->SortAllocationsBySizeDescending();
14557  }
14558 
14559  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
14560  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
14561 
14562  // This is a choice based on research.
14563  const uint32_t roundCount = 2;
14564 
14565  // Execute defragmentation rounds (the main part).
14566  VkResult result = VK_SUCCESS;
14567  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
14568  {
14569  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
14570  }
14571 
14572  return result;
14573 }
14574 
14575 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
14576  size_t dstBlockIndex, VkDeviceSize dstOffset,
14577  size_t srcBlockIndex, VkDeviceSize srcOffset)
14578 {
14579  if(dstBlockIndex < srcBlockIndex)
14580  {
14581  return true;
14582  }
14583  if(dstBlockIndex > srcBlockIndex)
14584  {
14585  return false;
14586  }
14587  if(dstOffset < srcOffset)
14588  {
14589  return true;
14590  }
14591  return false;
14592 }
14593 
14595 // VmaDefragmentationAlgorithm_Fast
14596 
14597 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
14598  VmaAllocator hAllocator,
14599  VmaBlockVector* pBlockVector,
14600  uint32_t currentFrameIndex,
14601  bool overlappingMoveSupported) :
14602  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14603  m_OverlappingMoveSupported(overlappingMoveSupported),
14604  m_AllocationCount(0),
14605  m_AllAllocations(false),
14606  m_BytesMoved(0),
14607  m_AllocationsMoved(0),
14608  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
14609 {
14610  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
14611 
14612 }
14613 
14614 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
14615 {
14616 }
14617 
14618 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
14619  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14620  VkDeviceSize maxBytesToMove,
14621  uint32_t maxAllocationsToMove,
14623 {
14624  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
14625 
14626  const size_t blockCount = m_pBlockVector->GetBlockCount();
14627  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
14628  {
14629  return VK_SUCCESS;
14630  }
14631 
14632  PreprocessMetadata();
14633 
14634  // Sort blocks in order from most destination.
14635 
14636  m_BlockInfos.resize(blockCount);
14637  for(size_t i = 0; i < blockCount; ++i)
14638  {
14639  m_BlockInfos[i].origBlockIndex = i;
14640  }
14641 
14642  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
14643  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
14644  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
14645  });
14646 
14647  // THE MAIN ALGORITHM
14648 
14649  FreeSpaceDatabase freeSpaceDb;
14650 
14651  size_t dstBlockInfoIndex = 0;
14652  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14653  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14654  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14655  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
14656  VkDeviceSize dstOffset = 0;
14657 
14658  bool end = false;
14659  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
14660  {
14661  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
14662  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
14663  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
14664  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
14665  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
14666  {
14667  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
14668  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
14669  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
14670  if(m_AllocationsMoved == maxAllocationsToMove ||
14671  m_BytesMoved + srcAllocSize > maxBytesToMove)
14672  {
14673  end = true;
14674  break;
14675  }
14676  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
14677 
14678  VmaDefragmentationMove move = {};
14679  // Try to place it in one of free spaces from the database.
14680  size_t freeSpaceInfoIndex;
14681  VkDeviceSize dstAllocOffset;
14682  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
14683  freeSpaceInfoIndex, dstAllocOffset))
14684  {
14685  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
14686  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
14687  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
14688 
14689  // Same block
14690  if(freeSpaceInfoIndex == srcBlockInfoIndex)
14691  {
14692  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14693 
14694  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14695 
14696  VmaSuballocation suballoc = *srcSuballocIt;
14697  suballoc.offset = dstAllocOffset;
14698  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
14699  m_BytesMoved += srcAllocSize;
14700  ++m_AllocationsMoved;
14701 
14702  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14703  ++nextSuballocIt;
14704  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14705  srcSuballocIt = nextSuballocIt;
14706 
14707  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14708 
14709  move.srcBlockIndex = srcOrigBlockIndex;
14710  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14711  move.srcOffset = srcAllocOffset;
14712  move.dstOffset = dstAllocOffset;
14713  move.size = srcAllocSize;
14714 
14715  moves.push_back(move);
14716  }
14717  // Different block
14718  else
14719  {
14720  // MOVE OPTION 2: Move the allocation to a different block.
14721 
14722  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
14723 
14724  VmaSuballocation suballoc = *srcSuballocIt;
14725  suballoc.offset = dstAllocOffset;
14726  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
14727  m_BytesMoved += srcAllocSize;
14728  ++m_AllocationsMoved;
14729 
14730  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14731  ++nextSuballocIt;
14732  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14733  srcSuballocIt = nextSuballocIt;
14734 
14735  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14736 
14737  move.srcBlockIndex = srcOrigBlockIndex;
14738  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14739  move.srcOffset = srcAllocOffset;
14740  move.dstOffset = dstAllocOffset;
14741  move.size = srcAllocSize;
14742 
14743  moves.push_back(move);
14744  }
14745  }
14746  else
14747  {
14748  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
14749 
14750  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
14751  while(dstBlockInfoIndex < srcBlockInfoIndex &&
14752  dstAllocOffset + srcAllocSize > dstBlockSize)
14753  {
14754  // But before that, register remaining free space at the end of dst block.
14755  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
14756 
14757  ++dstBlockInfoIndex;
14758  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14759  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14760  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14761  dstBlockSize = pDstMetadata->GetSize();
14762  dstOffset = 0;
14763  dstAllocOffset = 0;
14764  }
14765 
14766  // Same block
14767  if(dstBlockInfoIndex == srcBlockInfoIndex)
14768  {
14769  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14770 
14771  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
14772 
14773  bool skipOver = overlap;
14774  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
14775  {
14776  // If destination and source place overlap, skip if it would move it
14777  // by only < 1/64 of its size.
14778  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
14779  }
14780 
14781  if(skipOver)
14782  {
14783  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
14784 
14785  dstOffset = srcAllocOffset + srcAllocSize;
14786  ++srcSuballocIt;
14787  }
14788  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14789  else
14790  {
14791  srcSuballocIt->offset = dstAllocOffset;
14792  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
14793  dstOffset = dstAllocOffset + srcAllocSize;
14794  m_BytesMoved += srcAllocSize;
14795  ++m_AllocationsMoved;
14796  ++srcSuballocIt;
14797 
14798  move.srcBlockIndex = srcOrigBlockIndex;
14799  move.dstBlockIndex = dstOrigBlockIndex;
14800  move.srcOffset = srcAllocOffset;
14801  move.dstOffset = dstAllocOffset;
14802  move.size = srcAllocSize;
14803 
14804  moves.push_back(move);
14805  }
14806  }
14807  // Different block
14808  else
14809  {
14810  // MOVE OPTION 2: Move the allocation to a different block.
14811 
14812  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
14813  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
14814 
14815  VmaSuballocation suballoc = *srcSuballocIt;
14816  suballoc.offset = dstAllocOffset;
14817  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
14818  dstOffset = dstAllocOffset + srcAllocSize;
14819  m_BytesMoved += srcAllocSize;
14820  ++m_AllocationsMoved;
14821 
14822  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14823  ++nextSuballocIt;
14824  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14825  srcSuballocIt = nextSuballocIt;
14826 
14827  pDstMetadata->m_Suballocations.push_back(suballoc);
14828 
14829  move.srcBlockIndex = srcOrigBlockIndex;
14830  move.dstBlockIndex = dstOrigBlockIndex;
14831  move.srcOffset = srcAllocOffset;
14832  move.dstOffset = dstAllocOffset;
14833  move.size = srcAllocSize;
14834 
14835  moves.push_back(move);
14836  }
14837  }
14838  }
14839  }
14840 
14841  m_BlockInfos.clear();
14842 
14843  PostprocessMetadata();
14844 
14845  return VK_SUCCESS;
14846 }
14847 
14848 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
14849 {
14850  const size_t blockCount = m_pBlockVector->GetBlockCount();
14851  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14852  {
14853  VmaBlockMetadata_Generic* const pMetadata =
14854  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14855  pMetadata->m_FreeCount = 0;
14856  pMetadata->m_SumFreeSize = pMetadata->GetSize();
14857  pMetadata->m_FreeSuballocationsBySize.clear();
14858  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14859  it != pMetadata->m_Suballocations.end(); )
14860  {
14861  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
14862  {
14863  VmaSuballocationList::iterator nextIt = it;
14864  ++nextIt;
14865  pMetadata->m_Suballocations.erase(it);
14866  it = nextIt;
14867  }
14868  else
14869  {
14870  ++it;
14871  }
14872  }
14873  }
14874 }
14875 
14876 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
14877 {
14878  const size_t blockCount = m_pBlockVector->GetBlockCount();
14879  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14880  {
14881  VmaBlockMetadata_Generic* const pMetadata =
14882  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14883  const VkDeviceSize blockSize = pMetadata->GetSize();
14884 
14885  // No allocations in this block - entire area is free.
14886  if(pMetadata->m_Suballocations.empty())
14887  {
14888  pMetadata->m_FreeCount = 1;
14889  //pMetadata->m_SumFreeSize is already set to blockSize.
14890  VmaSuballocation suballoc = {
14891  0, // offset
14892  blockSize, // size
14893  VMA_NULL, // hAllocation
14894  VMA_SUBALLOCATION_TYPE_FREE };
14895  pMetadata->m_Suballocations.push_back(suballoc);
14896  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
14897  }
14898  // There are some allocations in this block.
14899  else
14900  {
14901  VkDeviceSize offset = 0;
14902  VmaSuballocationList::iterator it;
14903  for(it = pMetadata->m_Suballocations.begin();
14904  it != pMetadata->m_Suballocations.end();
14905  ++it)
14906  {
14907  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
14908  VMA_ASSERT(it->offset >= offset);
14909 
14910  // Need to insert preceding free space.
14911  if(it->offset > offset)
14912  {
14913  ++pMetadata->m_FreeCount;
14914  const VkDeviceSize freeSize = it->offset - offset;
14915  VmaSuballocation suballoc = {
14916  offset, // offset
14917  freeSize, // size
14918  VMA_NULL, // hAllocation
14919  VMA_SUBALLOCATION_TYPE_FREE };
14920  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14921  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14922  {
14923  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
14924  }
14925  }
14926 
14927  pMetadata->m_SumFreeSize -= it->size;
14928  offset = it->offset + it->size;
14929  }
14930 
14931  // Need to insert trailing free space.
14932  if(offset < blockSize)
14933  {
14934  ++pMetadata->m_FreeCount;
14935  const VkDeviceSize freeSize = blockSize - offset;
14936  VmaSuballocation suballoc = {
14937  offset, // offset
14938  freeSize, // size
14939  VMA_NULL, // hAllocation
14940  VMA_SUBALLOCATION_TYPE_FREE };
14941  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
14942  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14943  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14944  {
14945  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
14946  }
14947  }
14948 
14949  VMA_SORT(
14950  pMetadata->m_FreeSuballocationsBySize.begin(),
14951  pMetadata->m_FreeSuballocationsBySize.end(),
14952  VmaSuballocationItemSizeLess());
14953  }
14954 
14955  VMA_HEAVY_ASSERT(pMetadata->Validate());
14956  }
14957 }
14958 
14959 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
14960 {
14961  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
14962  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14963  while(it != pMetadata->m_Suballocations.end())
14964  {
14965  if(it->offset < suballoc.offset)
14966  {
14967  ++it;
14968  }
14969  }
14970  pMetadata->m_Suballocations.insert(it, suballoc);
14971 }
14972 
14974 // VmaBlockVectorDefragmentationContext
14975 
14976 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
14977  VmaAllocator hAllocator,
14978  VmaPool hCustomPool,
14979  VmaBlockVector* pBlockVector,
14980  uint32_t currFrameIndex) :
14981  res(VK_SUCCESS),
14982  mutexLocked(false),
14983  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
14984  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
14985  defragmentationMovesProcessed(0),
14986  defragmentationMovesCommitted(0),
14987  hasDefragmentationPlan(0),
14988  m_hAllocator(hAllocator),
14989  m_hCustomPool(hCustomPool),
14990  m_pBlockVector(pBlockVector),
14991  m_CurrFrameIndex(currFrameIndex),
14992  m_pAlgorithm(VMA_NULL),
14993  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
14994  m_AllAllocations(false)
14995 {
14996 }
14997 
14998 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
14999 {
15000  vma_delete(m_hAllocator, m_pAlgorithm);
15001 }
15002 
15003 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
15004 {
15005  AllocInfo info = { hAlloc, pChanged };
15006  m_Allocations.push_back(info);
15007 }
15008 
15009 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
15010 {
15011  const bool allAllocations = m_AllAllocations ||
15012  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
15013 
15014  /********************************
15015  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
15016  ********************************/
15017 
15018  /*
15019  Fast algorithm is supported only when certain criteria are met:
15020  - VMA_DEBUG_MARGIN is 0.
15021  - All allocations in this block vector are moveable.
15022  - There is no possibility of image/buffer granularity conflict.
15023  - The defragmentation is not incremental
15024  */
15025  if(VMA_DEBUG_MARGIN == 0 &&
15026  allAllocations &&
15027  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
15029  {
15030  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
15031  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
15032  }
15033  else
15034  {
15035  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
15036  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
15037  }
15038 
15039  if(allAllocations)
15040  {
15041  m_pAlgorithm->AddAll();
15042  }
15043  else
15044  {
15045  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
15046  {
15047  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
15048  }
15049  }
15050 }
15051 
15053 // VmaDefragmentationContext
15054 
15055 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
15056  VmaAllocator hAllocator,
15057  uint32_t currFrameIndex,
15058  uint32_t flags,
15059  VmaDefragmentationStats* pStats) :
15060  m_hAllocator(hAllocator),
15061  m_CurrFrameIndex(currFrameIndex),
15062  m_Flags(flags),
15063  m_pStats(pStats),
15064  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
15065 {
15066  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
15067 }
15068 
15069 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
15070 {
15071  for(size_t i = m_CustomPoolContexts.size(); i--; )
15072  {
15073  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
15074  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
15075  vma_delete(m_hAllocator, pBlockVectorCtx);
15076  }
15077  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
15078  {
15079  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
15080  if(pBlockVectorCtx)
15081  {
15082  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
15083  vma_delete(m_hAllocator, pBlockVectorCtx);
15084  }
15085  }
15086 }
15087 
15088 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
15089 {
15090  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
15091  {
15092  VmaPool pool = pPools[poolIndex];
15093  VMA_ASSERT(pool);
15094  // Pools with algorithm other than default are not defragmented.
15095  if(pool->m_BlockVector.GetAlgorithm() == 0)
15096  {
15097  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
15098 
15099  for(size_t i = m_CustomPoolContexts.size(); i--; )
15100  {
15101  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
15102  {
15103  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
15104  break;
15105  }
15106  }
15107 
15108  if(!pBlockVectorDefragCtx)
15109  {
15110  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
15111  m_hAllocator,
15112  pool,
15113  &pool->m_BlockVector,
15114  m_CurrFrameIndex);
15115  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
15116  }
15117 
15118  pBlockVectorDefragCtx->AddAll();
15119  }
15120  }
15121 }
15122 
15123 void VmaDefragmentationContext_T::AddAllocations(
15124  uint32_t allocationCount,
15125  const VmaAllocation* pAllocations,
15126  VkBool32* pAllocationsChanged)
15127 {
15128  // Dispatch pAllocations among defragmentators. Create them when necessary.
15129  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
15130  {
15131  const VmaAllocation hAlloc = pAllocations[allocIndex];
15132  VMA_ASSERT(hAlloc);
15133  // DedicatedAlloc cannot be defragmented.
15134  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
15135  // Lost allocation cannot be defragmented.
15136  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
15137  {
15138  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
15139 
15140  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
15141  // This allocation belongs to custom pool.
15142  if(hAllocPool != VK_NULL_HANDLE)
15143  {
15144  // Pools with algorithm other than default are not defragmented.
15145  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
15146  {
15147  for(size_t i = m_CustomPoolContexts.size(); i--; )
15148  {
15149  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
15150  {
15151  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
15152  break;
15153  }
15154  }
15155  if(!pBlockVectorDefragCtx)
15156  {
15157  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
15158  m_hAllocator,
15159  hAllocPool,
15160  &hAllocPool->m_BlockVector,
15161  m_CurrFrameIndex);
15162  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
15163  }
15164  }
15165  }
15166  // This allocation belongs to default pool.
15167  else
15168  {
15169  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
15170  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
15171  if(!pBlockVectorDefragCtx)
15172  {
15173  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
15174  m_hAllocator,
15175  VMA_NULL, // hCustomPool
15176  m_hAllocator->m_pBlockVectors[memTypeIndex],
15177  m_CurrFrameIndex);
15178  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
15179  }
15180  }
15181 
15182  if(pBlockVectorDefragCtx)
15183  {
15184  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
15185  &pAllocationsChanged[allocIndex] : VMA_NULL;
15186  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
15187  }
15188  }
15189  }
15190 }
15191 
15192 VkResult VmaDefragmentationContext_T::Defragment(
15193  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
15194  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
15195  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
15196 {
15197  if(pStats)
15198  {
15199  memset(pStats, 0, sizeof(VmaDefragmentationStats));
15200  }
15201 
15203  {
15204  // For incremental defragmetnations, we just earmark how much we can move
15205  // The real meat is in the defragmentation steps
15206  m_MaxCpuBytesToMove = maxCpuBytesToMove;
15207  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
15208 
15209  m_MaxGpuBytesToMove = maxGpuBytesToMove;
15210  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
15211 
15212  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
15213  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
15214  return VK_SUCCESS;
15215 
15216  return VK_NOT_READY;
15217  }
15218 
15219  if(commandBuffer == VK_NULL_HANDLE)
15220  {
15221  maxGpuBytesToMove = 0;
15222  maxGpuAllocationsToMove = 0;
15223  }
15224 
15225  VkResult res = VK_SUCCESS;
15226 
15227  // Process default pools.
15228  for(uint32_t memTypeIndex = 0;
15229  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
15230  ++memTypeIndex)
15231  {
15232  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15233  if(pBlockVectorCtx)
15234  {
15235  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15236  pBlockVectorCtx->GetBlockVector()->Defragment(
15237  pBlockVectorCtx,
15238  pStats, flags,
15239  maxCpuBytesToMove, maxCpuAllocationsToMove,
15240  maxGpuBytesToMove, maxGpuAllocationsToMove,
15241  commandBuffer);
15242  if(pBlockVectorCtx->res != VK_SUCCESS)
15243  {
15244  res = pBlockVectorCtx->res;
15245  }
15246  }
15247  }
15248 
15249  // Process custom pools.
15250  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15251  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
15252  ++customCtxIndex)
15253  {
15254  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15255  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15256  pBlockVectorCtx->GetBlockVector()->Defragment(
15257  pBlockVectorCtx,
15258  pStats, flags,
15259  maxCpuBytesToMove, maxCpuAllocationsToMove,
15260  maxGpuBytesToMove, maxGpuAllocationsToMove,
15261  commandBuffer);
15262  if(pBlockVectorCtx->res != VK_SUCCESS)
15263  {
15264  res = pBlockVectorCtx->res;
15265  }
15266  }
15267 
15268  return res;
15269 }
15270 
15271 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
15272 {
15273  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
15274  uint32_t movesLeft = pInfo->moveCount;
15275 
15276  // Process default pools.
15277  for(uint32_t memTypeIndex = 0;
15278  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
15279  ++memTypeIndex)
15280  {
15281  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15282  if(pBlockVectorCtx)
15283  {
15284  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15285 
15286  if(!pBlockVectorCtx->hasDefragmentationPlan)
15287  {
15288  pBlockVectorCtx->GetBlockVector()->Defragment(
15289  pBlockVectorCtx,
15290  m_pStats, m_Flags,
15291  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
15292  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
15293  VK_NULL_HANDLE);
15294 
15295  if(pBlockVectorCtx->res < VK_SUCCESS)
15296  continue;
15297 
15298  pBlockVectorCtx->hasDefragmentationPlan = true;
15299  }
15300 
15301  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
15302  pBlockVectorCtx,
15303  pCurrentMove, movesLeft);
15304 
15305  movesLeft -= processed;
15306  pCurrentMove += processed;
15307  }
15308  }
15309 
15310  // Process custom pools.
15311  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15312  customCtxIndex < customCtxCount;
15313  ++customCtxIndex)
15314  {
15315  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15316  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15317 
15318  if(!pBlockVectorCtx->hasDefragmentationPlan)
15319  {
15320  pBlockVectorCtx->GetBlockVector()->Defragment(
15321  pBlockVectorCtx,
15322  m_pStats, m_Flags,
15323  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
15324  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
15325  VK_NULL_HANDLE);
15326 
15327  if(pBlockVectorCtx->res < VK_SUCCESS)
15328  continue;
15329 
15330  pBlockVectorCtx->hasDefragmentationPlan = true;
15331  }
15332 
15333  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
15334  pBlockVectorCtx,
15335  pCurrentMove, movesLeft);
15336 
15337  movesLeft -= processed;
15338  pCurrentMove += processed;
15339  }
15340 
15341  pInfo->moveCount = pInfo->moveCount - movesLeft;
15342 
15343  return VK_SUCCESS;
15344 }
15345 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
15346 {
15347  VkResult res = VK_SUCCESS;
15348 
15349  // Process default pools.
15350  for(uint32_t memTypeIndex = 0;
15351  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
15352  ++memTypeIndex)
15353  {
15354  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
15355  if(pBlockVectorCtx)
15356  {
15357  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
15358 
15359  if(!pBlockVectorCtx->hasDefragmentationPlan)
15360  {
15361  res = VK_NOT_READY;
15362  continue;
15363  }
15364 
15365  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
15366  pBlockVectorCtx, m_pStats);
15367 
15368  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15369  res = VK_NOT_READY;
15370  }
15371  }
15372 
15373  // Process custom pools.
15374  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15375  customCtxIndex < customCtxCount;
15376  ++customCtxIndex)
15377  {
15378  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15379  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15380 
15381  if(!pBlockVectorCtx->hasDefragmentationPlan)
15382  {
15383  res = VK_NOT_READY;
15384  continue;
15385  }
15386 
15387  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
15388  pBlockVectorCtx, m_pStats);
15389 
15390  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15391  res = VK_NOT_READY;
15392  }
15393 
15394  return res;
15395 }
15396 
15398 // VmaRecorder
15399 
15400 #if VMA_RECORDING_ENABLED
15401 
15402 VmaRecorder::VmaRecorder() :
15403  m_UseMutex(true),
15404  m_Flags(0),
15405  m_File(VMA_NULL),
15406  m_RecordingStartTime(std::chrono::high_resolution_clock::now())
15407 {
15408 }
15409 
15410 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
15411 {
15412  m_UseMutex = useMutex;
15413  m_Flags = settings.flags;
15414 
15415 #if defined(_WIN32)
15416  // Open file for writing.
15417  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
15418 
15419  if(err != 0)
15420  {
15421  return VK_ERROR_INITIALIZATION_FAILED;
15422  }
15423 #else
15424  // Open file for writing.
15425  m_File = fopen(settings.pFilePath, "wb");
15426 
15427  if(m_File == 0)
15428  {
15429  return VK_ERROR_INITIALIZATION_FAILED;
15430  }
15431 #endif
15432 
15433  // Write header.
15434  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
15435  fprintf(m_File, "%s\n", "1,8");
15436 
15437  return VK_SUCCESS;
15438 }
15439 
15440 VmaRecorder::~VmaRecorder()
15441 {
15442  if(m_File != VMA_NULL)
15443  {
15444  fclose(m_File);
15445  }
15446 }
15447 
15448 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
15449 {
15450  CallParams callParams;
15451  GetBasicParams(callParams);
15452 
15453  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15454  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
15455  Flush();
15456 }
15457 
15458 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
15459 {
15460  CallParams callParams;
15461  GetBasicParams(callParams);
15462 
15463  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15464  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
15465  Flush();
15466 }
15467 
15468 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
15469 {
15470  CallParams callParams;
15471  GetBasicParams(callParams);
15472 
15473  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15474  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
15475  createInfo.memoryTypeIndex,
15476  createInfo.flags,
15477  createInfo.blockSize,
15478  (uint64_t)createInfo.minBlockCount,
15479  (uint64_t)createInfo.maxBlockCount,
15480  createInfo.frameInUseCount,
15481  pool);
15482  Flush();
15483 }
15484 
15485 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
15486 {
15487  CallParams callParams;
15488  GetBasicParams(callParams);
15489 
15490  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15491  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
15492  pool);
15493  Flush();
15494 }
15495 
15496 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
15497  const VkMemoryRequirements& vkMemReq,
15498  const VmaAllocationCreateInfo& createInfo,
15499  VmaAllocation allocation)
15500 {
15501  CallParams callParams;
15502  GetBasicParams(callParams);
15503 
15504  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15505  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15506  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15507  vkMemReq.size,
15508  vkMemReq.alignment,
15509  vkMemReq.memoryTypeBits,
15510  createInfo.flags,
15511  createInfo.usage,
15512  createInfo.requiredFlags,
15513  createInfo.preferredFlags,
15514  createInfo.memoryTypeBits,
15515  createInfo.pool,
15516  allocation,
15517  userDataStr.GetString());
15518  Flush();
15519 }
15520 
15521 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
15522  const VkMemoryRequirements& vkMemReq,
15523  const VmaAllocationCreateInfo& createInfo,
15524  uint64_t allocationCount,
15525  const VmaAllocation* pAllocations)
15526 {
15527  CallParams callParams;
15528  GetBasicParams(callParams);
15529 
15530  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15531  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15532  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
15533  vkMemReq.size,
15534  vkMemReq.alignment,
15535  vkMemReq.memoryTypeBits,
15536  createInfo.flags,
15537  createInfo.usage,
15538  createInfo.requiredFlags,
15539  createInfo.preferredFlags,
15540  createInfo.memoryTypeBits,
15541  createInfo.pool);
15542  PrintPointerList(allocationCount, pAllocations);
15543  fprintf(m_File, ",%s\n", userDataStr.GetString());
15544  Flush();
15545 }
15546 
15547 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
15548  const VkMemoryRequirements& vkMemReq,
15549  bool requiresDedicatedAllocation,
15550  bool prefersDedicatedAllocation,
15551  const VmaAllocationCreateInfo& createInfo,
15552  VmaAllocation allocation)
15553 {
15554  CallParams callParams;
15555  GetBasicParams(callParams);
15556 
15557  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15558  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15559  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15560  vkMemReq.size,
15561  vkMemReq.alignment,
15562  vkMemReq.memoryTypeBits,
15563  requiresDedicatedAllocation ? 1 : 0,
15564  prefersDedicatedAllocation ? 1 : 0,
15565  createInfo.flags,
15566  createInfo.usage,
15567  createInfo.requiredFlags,
15568  createInfo.preferredFlags,
15569  createInfo.memoryTypeBits,
15570  createInfo.pool,
15571  allocation,
15572  userDataStr.GetString());
15573  Flush();
15574 }
15575 
15576 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
15577  const VkMemoryRequirements& vkMemReq,
15578  bool requiresDedicatedAllocation,
15579  bool prefersDedicatedAllocation,
15580  const VmaAllocationCreateInfo& createInfo,
15581  VmaAllocation allocation)
15582 {
15583  CallParams callParams;
15584  GetBasicParams(callParams);
15585 
15586  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15587  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15588  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15589  vkMemReq.size,
15590  vkMemReq.alignment,
15591  vkMemReq.memoryTypeBits,
15592  requiresDedicatedAllocation ? 1 : 0,
15593  prefersDedicatedAllocation ? 1 : 0,
15594  createInfo.flags,
15595  createInfo.usage,
15596  createInfo.requiredFlags,
15597  createInfo.preferredFlags,
15598  createInfo.memoryTypeBits,
15599  createInfo.pool,
15600  allocation,
15601  userDataStr.GetString());
15602  Flush();
15603 }
15604 
15605 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
15606  VmaAllocation allocation)
15607 {
15608  CallParams callParams;
15609  GetBasicParams(callParams);
15610 
15611  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15612  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15613  allocation);
15614  Flush();
15615 }
15616 
15617 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
15618  uint64_t allocationCount,
15619  const VmaAllocation* pAllocations)
15620 {
15621  CallParams callParams;
15622  GetBasicParams(callParams);
15623 
15624  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15625  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
15626  PrintPointerList(allocationCount, pAllocations);
15627  fprintf(m_File, "\n");
15628  Flush();
15629 }
15630 
15631 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
15632  VmaAllocation allocation,
15633  const void* pUserData)
15634 {
15635  CallParams callParams;
15636  GetBasicParams(callParams);
15637 
15638  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15639  UserDataString userDataStr(
15640  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
15641  pUserData);
15642  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15643  allocation,
15644  userDataStr.GetString());
15645  Flush();
15646 }
15647 
15648 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
15649  VmaAllocation allocation)
15650 {
15651  CallParams callParams;
15652  GetBasicParams(callParams);
15653 
15654  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15655  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15656  allocation);
15657  Flush();
15658 }
15659 
15660 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
15661  VmaAllocation allocation)
15662 {
15663  CallParams callParams;
15664  GetBasicParams(callParams);
15665 
15666  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15667  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15668  allocation);
15669  Flush();
15670 }
15671 
15672 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
15673  VmaAllocation allocation)
15674 {
15675  CallParams callParams;
15676  GetBasicParams(callParams);
15677 
15678  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15679  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15680  allocation);
15681  Flush();
15682 }
15683 
15684 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
15685  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15686 {
15687  CallParams callParams;
15688  GetBasicParams(callParams);
15689 
15690  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15691  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15692  allocation,
15693  offset,
15694  size);
15695  Flush();
15696 }
15697 
15698 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
15699  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15700 {
15701  CallParams callParams;
15702  GetBasicParams(callParams);
15703 
15704  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15705  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15706  allocation,
15707  offset,
15708  size);
15709  Flush();
15710 }
15711 
15712 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
15713  const VkBufferCreateInfo& bufCreateInfo,
15714  const VmaAllocationCreateInfo& allocCreateInfo,
15715  VmaAllocation allocation)
15716 {
15717  CallParams callParams;
15718  GetBasicParams(callParams);
15719 
15720  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15721  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15722  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15723  bufCreateInfo.flags,
15724  bufCreateInfo.size,
15725  bufCreateInfo.usage,
15726  bufCreateInfo.sharingMode,
15727  allocCreateInfo.flags,
15728  allocCreateInfo.usage,
15729  allocCreateInfo.requiredFlags,
15730  allocCreateInfo.preferredFlags,
15731  allocCreateInfo.memoryTypeBits,
15732  allocCreateInfo.pool,
15733  allocation,
15734  userDataStr.GetString());
15735  Flush();
15736 }
15737 
15738 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
15739  const VkImageCreateInfo& imageCreateInfo,
15740  const VmaAllocationCreateInfo& allocCreateInfo,
15741  VmaAllocation allocation)
15742 {
15743  CallParams callParams;
15744  GetBasicParams(callParams);
15745 
15746  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15747  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15748  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15749  imageCreateInfo.flags,
15750  imageCreateInfo.imageType,
15751  imageCreateInfo.format,
15752  imageCreateInfo.extent.width,
15753  imageCreateInfo.extent.height,
15754  imageCreateInfo.extent.depth,
15755  imageCreateInfo.mipLevels,
15756  imageCreateInfo.arrayLayers,
15757  imageCreateInfo.samples,
15758  imageCreateInfo.tiling,
15759  imageCreateInfo.usage,
15760  imageCreateInfo.sharingMode,
15761  imageCreateInfo.initialLayout,
15762  allocCreateInfo.flags,
15763  allocCreateInfo.usage,
15764  allocCreateInfo.requiredFlags,
15765  allocCreateInfo.preferredFlags,
15766  allocCreateInfo.memoryTypeBits,
15767  allocCreateInfo.pool,
15768  allocation,
15769  userDataStr.GetString());
15770  Flush();
15771 }
15772 
15773 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
15774  VmaAllocation allocation)
15775 {
15776  CallParams callParams;
15777  GetBasicParams(callParams);
15778 
15779  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15780  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
15781  allocation);
15782  Flush();
15783 }
15784 
15785 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
15786  VmaAllocation allocation)
15787 {
15788  CallParams callParams;
15789  GetBasicParams(callParams);
15790 
15791  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15792  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
15793  allocation);
15794  Flush();
15795 }
15796 
15797 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
15798  VmaAllocation allocation)
15799 {
15800  CallParams callParams;
15801  GetBasicParams(callParams);
15802 
15803  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15804  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15805  allocation);
15806  Flush();
15807 }
15808 
15809 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
15810  VmaAllocation allocation)
15811 {
15812  CallParams callParams;
15813  GetBasicParams(callParams);
15814 
15815  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15816  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
15817  allocation);
15818  Flush();
15819 }
15820 
15821 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
15822  VmaPool pool)
15823 {
15824  CallParams callParams;
15825  GetBasicParams(callParams);
15826 
15827  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15828  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
15829  pool);
15830  Flush();
15831 }
15832 
15833 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
15834  const VmaDefragmentationInfo2& info,
15836 {
15837  CallParams callParams;
15838  GetBasicParams(callParams);
15839 
15840  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15841  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
15842  info.flags);
15843  PrintPointerList(info.allocationCount, info.pAllocations);
15844  fprintf(m_File, ",");
15845  PrintPointerList(info.poolCount, info.pPools);
15846  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
15847  info.maxCpuBytesToMove,
15849  info.maxGpuBytesToMove,
15851  info.commandBuffer,
15852  ctx);
15853  Flush();
15854 }
15855 
15856 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
15858 {
15859  CallParams callParams;
15860  GetBasicParams(callParams);
15861 
15862  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15863  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
15864  ctx);
15865  Flush();
15866 }
15867 
15868 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
15869  VmaPool pool,
15870  const char* name)
15871 {
15872  CallParams callParams;
15873  GetBasicParams(callParams);
15874 
15875  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15876  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15877  pool, name != VMA_NULL ? name : "");
15878  Flush();
15879 }
15880 
15881 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
15882 {
15883  if(pUserData != VMA_NULL)
15884  {
15885  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
15886  {
15887  m_Str = (const char*)pUserData;
15888  }
15889  else
15890  {
15891  // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it.
15892  snprintf(m_PtrStr, 17, "%p", pUserData);
15893  m_Str = m_PtrStr;
15894  }
15895  }
15896  else
15897  {
15898  m_Str = "";
15899  }
15900 }
15901 
15902 void VmaRecorder::WriteConfiguration(
15903  const VkPhysicalDeviceProperties& devProps,
15904  const VkPhysicalDeviceMemoryProperties& memProps,
15905  uint32_t vulkanApiVersion,
15906  bool dedicatedAllocationExtensionEnabled,
15907  bool bindMemory2ExtensionEnabled,
15908  bool memoryBudgetExtensionEnabled,
15909  bool deviceCoherentMemoryExtensionEnabled)
15910 {
15911  fprintf(m_File, "Config,Begin\n");
15912 
15913  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
15914 
15915  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
15916  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
15917  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
15918  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
15919  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
15920  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
15921 
15922  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
15923  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
15924  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
15925 
15926  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
15927  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
15928  {
15929  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
15930  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
15931  }
15932  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
15933  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
15934  {
15935  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
15936  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
15937  }
15938 
15939  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
15940  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
15941  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
15942  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
15943 
15944  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
15945  fprintf(m_File, "Macro,VMA_MIN_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_MIN_ALIGNMENT);
15946  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
15947  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
15948  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
15949  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
15950  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
15951  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
15952  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15953 
15954  fprintf(m_File, "Config,End\n");
15955 }
15956 
15957 void VmaRecorder::GetBasicParams(CallParams& outParams)
15958 {
15959  #if defined(_WIN32)
15960  outParams.threadId = GetCurrentThreadId();
15961  #else
15962  // Use C++11 features to get thread id and convert it to uint32_t.
15963  // There is room for optimization since sstream is quite slow.
15964  // Is there a better way to convert std::this_thread::get_id() to uint32_t?
15965  std::thread::id thread_id = std::this_thread::get_id();
15966  std::stringstream thread_id_to_string_converter;
15967  thread_id_to_string_converter << thread_id;
15968  std::string thread_id_as_string = thread_id_to_string_converter.str();
15969  outParams.threadId = static_cast<uint32_t>(std::stoi(thread_id_as_string.c_str()));
15970  #endif
15971 
15972  auto current_time = std::chrono::high_resolution_clock::now();
15973 
15974  outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
15975 }
15976 
15977 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
15978 {
15979  if(count)
15980  {
15981  fprintf(m_File, "%p", pItems[0]);
15982  for(uint64_t i = 1; i < count; ++i)
15983  {
15984  fprintf(m_File, " %p", pItems[i]);
15985  }
15986  }
15987 }
15988 
15989 void VmaRecorder::Flush()
15990 {
15991  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
15992  {
15993  fflush(m_File);
15994  }
15995 }
15996 
15997 #endif // #if VMA_RECORDING_ENABLED
15998 
16000 // VmaAllocationObjectAllocator
16001 
16002 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
16003  m_Allocator(pAllocationCallbacks, 1024)
16004 {
16005 }
16006 
16007 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
16008 {
16009  VmaMutexLock mutexLock(m_Mutex);
16010  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
16011 }
16012 
16013 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
16014 {
16015  VmaMutexLock mutexLock(m_Mutex);
16016  m_Allocator.Free(hAlloc);
16017 }
16018 
16020 // VmaAllocator_T
16021 
16022 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
16023  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
16024  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
16025  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
16026  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
16027  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
16028  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
16029  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
16030  m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
16031  m_hDevice(pCreateInfo->device),
16032  m_hInstance(pCreateInfo->instance),
16033  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
16034  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
16035  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
16036  m_AllocationObjectAllocator(&m_AllocationCallbacks),
16037  m_HeapSizeLimitMask(0),
16038  m_DeviceMemoryCount(0),
16039  m_PreferredLargeHeapBlockSize(0),
16040  m_PhysicalDevice(pCreateInfo->physicalDevice),
16041  m_CurrentFrameIndex(0),
16042  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
16043  m_NextPoolId(0),
16044  m_GlobalMemoryTypeBits(UINT32_MAX)
16046  ,m_pRecorder(VMA_NULL)
16047 #endif
16048 {
16049  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16050  {
16051  m_UseKhrDedicatedAllocation = false;
16052  m_UseKhrBindMemory2 = false;
16053  }
16054 
16055  if(VMA_DEBUG_DETECT_CORRUPTION)
16056  {
16057  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
16058  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
16059  }
16060 
16061  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
16062 
16063  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
16064  {
16065 #if !(VMA_DEDICATED_ALLOCATION)
16067  {
16068  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
16069  }
16070 #endif
16071 #if !(VMA_BIND_MEMORY2)
16072  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
16073  {
16074  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
16075  }
16076 #endif
16077  }
16078 #if !(VMA_MEMORY_BUDGET)
16079  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
16080  {
16081  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
16082  }
16083 #endif
16084 #if !(VMA_BUFFER_DEVICE_ADDRESS)
16085  if(m_UseKhrBufferDeviceAddress)
16086  {
16087  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
16088  }
16089 #endif
16090 #if VMA_VULKAN_VERSION < 1002000
16091  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
16092  {
16093  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
16094  }
16095 #endif
16096 #if VMA_VULKAN_VERSION < 1001000
16097  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16098  {
16099  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
16100  }
16101 #endif
16102 #if !(VMA_MEMORY_PRIORITY)
16103  if(m_UseExtMemoryPriority)
16104  {
16105  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
16106  }
16107 #endif
16108 
16109  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
16110  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
16111  memset(&m_MemProps, 0, sizeof(m_MemProps));
16112 
16113  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
16114  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
16115 
16116 
16117  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
16118  {
16119  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
16120  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
16121  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
16122  }
16123 
16124  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
16125 
16126  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
16127  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
16128 
16129  VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
16130  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
16131  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
16132  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
16133 
16134  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
16135  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
16136 
16137  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
16138 
16139 
16140  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
16141  {
16142  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
16143  {
16144  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
16145  if(limit != VK_WHOLE_SIZE)
16146  {
16147  m_HeapSizeLimitMask |= 1u << heapIndex;
16148  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
16149  {
16150  m_MemProps.memoryHeaps[heapIndex].size = limit;
16151  }
16152  }
16153  }
16154  }
16155 
16156  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16157  {
16158  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
16159 
16160  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
16161  this,
16162  VK_NULL_HANDLE, // hParentPool
16163  memTypeIndex,
16164  preferredBlockSize,
16165  0,
16166  SIZE_MAX,
16167  GetBufferImageGranularity(),
16168  pCreateInfo->frameInUseCount,
16169  false, // explicitBlockSize
16170  false, // linearAlgorithm
16171  0.5f, // priority (0.5 is the default per Vulkan spec)
16172  GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
16173  VMA_NULL); // // pMemoryAllocateNext
16174  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
16175  // becase minBlockCount is 0.
16176  }
16177 }
16178 
16179 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
16180 {
16181  VkResult res = VK_SUCCESS;
16182 
16183  if(pCreateInfo->pRecordSettings != VMA_NULL &&
16184  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
16185  {
16186 #if VMA_RECORDING_ENABLED
16187  m_pRecorder = vma_new(this, VmaRecorder)();
16188  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
16189  if(res != VK_SUCCESS)
16190  {
16191  return res;
16192  }
16193  m_pRecorder->WriteConfiguration(
16194  m_PhysicalDeviceProperties,
16195  m_MemProps,
16196  m_VulkanApiVersion,
16197  m_UseKhrDedicatedAllocation,
16198  m_UseKhrBindMemory2,
16199  m_UseExtMemoryBudget,
16200  m_UseAmdDeviceCoherentMemory);
16201  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
16202 #else
16203  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
16204  return VK_ERROR_FEATURE_NOT_PRESENT;
16205 #endif
16206  }
16207 
16208 #if VMA_MEMORY_BUDGET
16209  if(m_UseExtMemoryBudget)
16210  {
16211  UpdateVulkanBudget();
16212  }
16213 #endif // #if VMA_MEMORY_BUDGET
16214 
16215  return res;
16216 }
16217 
16218 VmaAllocator_T::~VmaAllocator_T()
16219 {
16220 #if VMA_RECORDING_ENABLED
16221  if(m_pRecorder != VMA_NULL)
16222  {
16223  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
16224  vma_delete(this, m_pRecorder);
16225  }
16226 #endif
16227 
16228  VMA_ASSERT(m_Pools.IsEmpty());
16229 
16230  for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
16231  {
16232  if(!m_DedicatedAllocations[memTypeIndex].IsEmpty())
16233  {
16234  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
16235  }
16236 
16237  vma_delete(this, m_pBlockVectors[memTypeIndex]);
16238  }
16239 }
16240 
16241 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
16242 {
16243 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
16244  ImportVulkanFunctions_Static();
16245 #endif
16246 
16247  if(pVulkanFunctions != VMA_NULL)
16248  {
16249  ImportVulkanFunctions_Custom(pVulkanFunctions);
16250  }
16251 
16252 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16253  ImportVulkanFunctions_Dynamic();
16254 #endif
16255 
16256  ValidateVulkanFunctions();
16257 }
16258 
16259 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
16260 
16261 void VmaAllocator_T::ImportVulkanFunctions_Static()
16262 {
16263  // Vulkan 1.0
16264  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
16265  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
16266  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
16267  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
16268  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
16269  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
16270  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
16271  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
16272  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
16273  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
16274  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
16275  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
16276  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
16277  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
16278  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
16279  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
16280  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
16281 
16282  // Vulkan 1.1
16283 #if VMA_VULKAN_VERSION >= 1001000
16284  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16285  {
16286  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
16287  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
16288  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
16289  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
16290  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
16291  }
16292 #endif
16293 }
16294 
16295 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
16296 
16297 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
16298 {
16299  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
16300 
16301 #define VMA_COPY_IF_NOT_NULL(funcName) \
16302  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
16303 
16304  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
16305  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
16306  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
16307  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
16308  VMA_COPY_IF_NOT_NULL(vkMapMemory);
16309  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
16310  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
16311  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
16312  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
16313  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
16314  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
16315  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
16316  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
16317  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
16318  VMA_COPY_IF_NOT_NULL(vkCreateImage);
16319  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
16320  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
16321 
16322 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16323  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
16324  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
16325 #endif
16326 
16327 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
16328  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
16329  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
16330 #endif
16331 
16332 #if VMA_MEMORY_BUDGET
16333  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
16334 #endif
16335 
16336 #undef VMA_COPY_IF_NOT_NULL
16337 }
16338 
16339 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16340 
16341 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
16342 {
16343 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
16344  if(m_VulkanFunctions.memberName == VMA_NULL) \
16345  m_VulkanFunctions.memberName = \
16346  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
16347 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
16348  if(m_VulkanFunctions.memberName == VMA_NULL) \
16349  m_VulkanFunctions.memberName = \
16350  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
16351 
16352  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
16353  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
16354  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
16355  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
16356  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
16357  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
16358  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
16359  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
16360  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
16361  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
16362  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
16363  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
16364  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
16365  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
16366  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
16367  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
16368  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
16369 
16370 #if VMA_VULKAN_VERSION >= 1001000
16371  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16372  {
16373  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
16374  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
16375  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
16376  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
16377  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
16378  }
16379 #endif
16380 
16381 #if VMA_DEDICATED_ALLOCATION
16382  if(m_UseKhrDedicatedAllocation)
16383  {
16384  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
16385  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
16386  }
16387 #endif
16388 
16389 #if VMA_BIND_MEMORY2
16390  if(m_UseKhrBindMemory2)
16391  {
16392  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
16393  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
16394  }
16395 #endif // #if VMA_BIND_MEMORY2
16396 
16397 #if VMA_MEMORY_BUDGET
16398  if(m_UseExtMemoryBudget)
16399  {
16400  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
16401  }
16402 #endif // #if VMA_MEMORY_BUDGET
16403 
16404 #undef VMA_FETCH_DEVICE_FUNC
16405 #undef VMA_FETCH_INSTANCE_FUNC
16406 }
16407 
16408 #endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16409 
16410 void VmaAllocator_T::ValidateVulkanFunctions()
16411 {
16412  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
16413  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
16414  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
16415  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
16416  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
16417  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
16418  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
16419  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
16420  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
16421  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
16422  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
16423  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
16424  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
16425  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
16426  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
16427  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
16428  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
16429 
16430 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16431  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
16432  {
16433  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
16434  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
16435  }
16436 #endif
16437 
16438 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
16439  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
16440  {
16441  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
16442  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
16443  }
16444 #endif
16445 
16446 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
16447  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16448  {
16449  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
16450  }
16451 #endif
16452 }
16453 
16454 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
16455 {
16456  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16457  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16458  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
16459  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
16460 }
16461 
16462 VkResult VmaAllocator_T::AllocateMemoryOfType(
16463  VkDeviceSize size,
16464  VkDeviceSize alignment,
16465  bool dedicatedAllocation,
16466  VkBuffer dedicatedBuffer,
16467  VkBufferUsageFlags dedicatedBufferUsage,
16468  VkImage dedicatedImage,
16469  const VmaAllocationCreateInfo& createInfo,
16470  uint32_t memTypeIndex,
16471  VmaSuballocationType suballocType,
16472  size_t allocationCount,
16473  VmaAllocation* pAllocations)
16474 {
16475  VMA_ASSERT(pAllocations != VMA_NULL);
16476  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
16477 
16478  VmaAllocationCreateInfo finalCreateInfo = createInfo;
16479 
16480  // If memory type is not HOST_VISIBLE, disable MAPPED.
16481  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16482  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16483  {
16484  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16485  }
16486  // If memory is lazily allocated, it should be always dedicated.
16487  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
16488  {
16490  }
16491 
16492  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
16493  VMA_ASSERT(blockVector);
16494 
16495  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
16496  bool preferDedicatedMemory =
16497  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
16498  dedicatedAllocation ||
16499  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
16500  size > preferredBlockSize / 2;
16501 
16502  if(preferDedicatedMemory &&
16503  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
16504  finalCreateInfo.pool == VK_NULL_HANDLE)
16505  {
16507  }
16508 
16509  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
16510  {
16511  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16512  {
16513  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16514  }
16515  else
16516  {
16517  return AllocateDedicatedMemory(
16518  size,
16519  suballocType,
16520  memTypeIndex,
16521  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16522  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16523  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16524  finalCreateInfo.pUserData,
16525  finalCreateInfo.priority,
16526  dedicatedBuffer,
16527  dedicatedBufferUsage,
16528  dedicatedImage,
16529  allocationCount,
16530  pAllocations);
16531  }
16532  }
16533  else
16534  {
16535  VkResult res = blockVector->Allocate(
16536  m_CurrentFrameIndex.load(),
16537  size,
16538  alignment,
16539  finalCreateInfo,
16540  suballocType,
16541  allocationCount,
16542  pAllocations);
16543  if(res == VK_SUCCESS)
16544  {
16545  return res;
16546  }
16547 
16548  // 5. Try dedicated memory.
16549  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16550  {
16551  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16552  }
16553 
16554  // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
16555  // which can quickly deplete maxMemoryAllocationCount: Don't try dedicated allocations when above
16556  // 3/4 of the maximum allocation count.
16557  if(m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
16558  {
16559  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16560  }
16561 
16562  res = AllocateDedicatedMemory(
16563  size,
16564  suballocType,
16565  memTypeIndex,
16566  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16567  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16568  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16569  finalCreateInfo.pUserData,
16570  finalCreateInfo.priority,
16571  dedicatedBuffer,
16572  dedicatedBufferUsage,
16573  dedicatedImage,
16574  allocationCount,
16575  pAllocations);
16576  if(res == VK_SUCCESS)
16577  {
16578  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
16579  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
16580  return VK_SUCCESS;
16581  }
16582  else
16583  {
16584  // Everything failed: Return error code.
16585  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16586  return res;
16587  }
16588  }
16589 }
16590 
16591 VkResult VmaAllocator_T::AllocateDedicatedMemory(
16592  VkDeviceSize size,
16593  VmaSuballocationType suballocType,
16594  uint32_t memTypeIndex,
16595  bool withinBudget,
16596  bool map,
16597  bool isUserDataString,
16598  void* pUserData,
16599  float priority,
16600  VkBuffer dedicatedBuffer,
16601  VkBufferUsageFlags dedicatedBufferUsage,
16602  VkImage dedicatedImage,
16603  size_t allocationCount,
16604  VmaAllocation* pAllocations)
16605 {
16606  VMA_ASSERT(allocationCount > 0 && pAllocations);
16607 
16608  if(withinBudget)
16609  {
16610  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16611  VmaBudget heapBudget = {};
16612  GetBudget(&heapBudget, heapIndex, 1);
16613  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
16614  {
16615  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16616  }
16617  }
16618 
16619  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
16620  allocInfo.memoryTypeIndex = memTypeIndex;
16621  allocInfo.allocationSize = size;
16622 
16623 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16624  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
16625  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16626  {
16627  if(dedicatedBuffer != VK_NULL_HANDLE)
16628  {
16629  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
16630  dedicatedAllocInfo.buffer = dedicatedBuffer;
16631  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16632  }
16633  else if(dedicatedImage != VK_NULL_HANDLE)
16634  {
16635  dedicatedAllocInfo.image = dedicatedImage;
16636  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16637  }
16638  }
16639 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16640 
16641 #if VMA_BUFFER_DEVICE_ADDRESS
16642  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
16643  if(m_UseKhrBufferDeviceAddress)
16644  {
16645  bool canContainBufferWithDeviceAddress = true;
16646  if(dedicatedBuffer != VK_NULL_HANDLE)
16647  {
16648  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
16649  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
16650  }
16651  else if(dedicatedImage != VK_NULL_HANDLE)
16652  {
16653  canContainBufferWithDeviceAddress = false;
16654  }
16655  if(canContainBufferWithDeviceAddress)
16656  {
16657  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
16658  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
16659  }
16660  }
16661 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
16662 
16663 #if VMA_MEMORY_PRIORITY
16664  VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
16665  if(m_UseExtMemoryPriority)
16666  {
16667  priorityInfo.priority = priority;
16668  VmaPnextChainPushFront(&allocInfo, &priorityInfo);
16669  }
16670 #endif // #if VMA_MEMORY_PRIORITY
16671 
16672  size_t allocIndex;
16673  VkResult res = VK_SUCCESS;
16674  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16675  {
16676  res = AllocateDedicatedMemoryPage(
16677  size,
16678  suballocType,
16679  memTypeIndex,
16680  allocInfo,
16681  map,
16682  isUserDataString,
16683  pUserData,
16684  pAllocations + allocIndex);
16685  if(res != VK_SUCCESS)
16686  {
16687  break;
16688  }
16689  }
16690 
16691  if(res == VK_SUCCESS)
16692  {
16693  // Register them in m_DedicatedAllocations.
16694  {
16695  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16696  DedicatedAllocationLinkedList& dedicatedAllocations = m_DedicatedAllocations[memTypeIndex];
16697  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16698  {
16699  dedicatedAllocations.PushBack(pAllocations[allocIndex]);
16700  }
16701  }
16702 
16703  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
16704  }
16705  else
16706  {
16707  // Free all already created allocations.
16708  while(allocIndex--)
16709  {
16710  VmaAllocation currAlloc = pAllocations[allocIndex];
16711  VkDeviceMemory hMemory = currAlloc->GetMemory();
16712 
16713  /*
16714  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16715  before vkFreeMemory.
16716 
16717  if(currAlloc->GetMappedData() != VMA_NULL)
16718  {
16719  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16720  }
16721  */
16722 
16723  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
16724  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
16725  currAlloc->SetUserData(this, VMA_NULL);
16726  m_AllocationObjectAllocator.Free(currAlloc);
16727  }
16728 
16729  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16730  }
16731 
16732  return res;
16733 }
16734 
16735 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
16736  VkDeviceSize size,
16737  VmaSuballocationType suballocType,
16738  uint32_t memTypeIndex,
16739  const VkMemoryAllocateInfo& allocInfo,
16740  bool map,
16741  bool isUserDataString,
16742  void* pUserData,
16743  VmaAllocation* pAllocation)
16744 {
16745  VkDeviceMemory hMemory = VK_NULL_HANDLE;
16746  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
16747  if(res < 0)
16748  {
16749  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16750  return res;
16751  }
16752 
16753  void* pMappedData = VMA_NULL;
16754  if(map)
16755  {
16756  res = (*m_VulkanFunctions.vkMapMemory)(
16757  m_hDevice,
16758  hMemory,
16759  0,
16760  VK_WHOLE_SIZE,
16761  0,
16762  &pMappedData);
16763  if(res < 0)
16764  {
16765  VMA_DEBUG_LOG(" vkMapMemory FAILED");
16766  FreeVulkanMemory(memTypeIndex, size, hMemory);
16767  return res;
16768  }
16769  }
16770 
16771  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
16772  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
16773  (*pAllocation)->SetUserData(this, pUserData);
16774  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
16775  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16776  {
16777  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
16778  }
16779 
16780  return VK_SUCCESS;
16781 }
16782 
16783 void VmaAllocator_T::GetBufferMemoryRequirements(
16784  VkBuffer hBuffer,
16785  VkMemoryRequirements& memReq,
16786  bool& requiresDedicatedAllocation,
16787  bool& prefersDedicatedAllocation) const
16788 {
16789 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16790  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16791  {
16792  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
16793  memReqInfo.buffer = hBuffer;
16794 
16795  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16796 
16797  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16798  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16799 
16800  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16801 
16802  memReq = memReq2.memoryRequirements;
16803  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16804  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16805  }
16806  else
16807 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16808  {
16809  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
16810  requiresDedicatedAllocation = false;
16811  prefersDedicatedAllocation = false;
16812  }
16813 }
16814 
16815 void VmaAllocator_T::GetImageMemoryRequirements(
16816  VkImage hImage,
16817  VkMemoryRequirements& memReq,
16818  bool& requiresDedicatedAllocation,
16819  bool& prefersDedicatedAllocation) const
16820 {
16821 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16822  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16823  {
16824  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
16825  memReqInfo.image = hImage;
16826 
16827  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16828 
16829  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16830  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16831 
16832  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16833 
16834  memReq = memReq2.memoryRequirements;
16835  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16836  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16837  }
16838  else
16839 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16840  {
16841  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
16842  requiresDedicatedAllocation = false;
16843  prefersDedicatedAllocation = false;
16844  }
16845 }
16846 
16847 VkResult VmaAllocator_T::AllocateMemory(
16848  const VkMemoryRequirements& vkMemReq,
16849  bool requiresDedicatedAllocation,
16850  bool prefersDedicatedAllocation,
16851  VkBuffer dedicatedBuffer,
16852  VkBufferUsageFlags dedicatedBufferUsage,
16853  VkImage dedicatedImage,
16854  const VmaAllocationCreateInfo& createInfo,
16855  VmaSuballocationType suballocType,
16856  size_t allocationCount,
16857  VmaAllocation* pAllocations)
16858 {
16859  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16860 
16861  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
16862 
16863  if(vkMemReq.size == 0)
16864  {
16865  return VK_ERROR_VALIDATION_FAILED_EXT;
16866  }
16867  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
16868  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16869  {
16870  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
16871  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16872  }
16873  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16875  {
16876  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
16877  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16878  }
16879  if(requiresDedicatedAllocation)
16880  {
16881  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16882  {
16883  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
16884  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16885  }
16886  if(createInfo.pool != VK_NULL_HANDLE)
16887  {
16888  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
16889  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16890  }
16891  }
16892  if((createInfo.pool != VK_NULL_HANDLE) &&
16893  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
16894  {
16895  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
16896  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16897  }
16898 
16899  if(createInfo.pool != VK_NULL_HANDLE)
16900  {
16901  VmaAllocationCreateInfo createInfoForPool = createInfo;
16902  // If memory type is not HOST_VISIBLE, disable MAPPED.
16903  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16904  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16905  {
16906  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16907  }
16908 
16909  return createInfo.pool->m_BlockVector.Allocate(
16910  m_CurrentFrameIndex.load(),
16911  vkMemReq.size,
16912  vkMemReq.alignment,
16913  createInfoForPool,
16914  suballocType,
16915  allocationCount,
16916  pAllocations);
16917  }
16918  else
16919  {
16920  // Bit mask of memory Vulkan types acceptable for this allocation.
16921  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
16922  uint32_t memTypeIndex = UINT32_MAX;
16923  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16924  if(res == VK_SUCCESS)
16925  {
16926  res = AllocateMemoryOfType(
16927  vkMemReq.size,
16928  vkMemReq.alignment,
16929  requiresDedicatedAllocation || prefersDedicatedAllocation,
16930  dedicatedBuffer,
16931  dedicatedBufferUsage,
16932  dedicatedImage,
16933  createInfo,
16934  memTypeIndex,
16935  suballocType,
16936  allocationCount,
16937  pAllocations);
16938  // Succeeded on first try.
16939  if(res == VK_SUCCESS)
16940  {
16941  return res;
16942  }
16943  // Allocation from this memory type failed. Try other compatible memory types.
16944  else
16945  {
16946  for(;;)
16947  {
16948  // Remove old memTypeIndex from list of possibilities.
16949  memoryTypeBits &= ~(1u << memTypeIndex);
16950  // Find alternative memTypeIndex.
16951  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16952  if(res == VK_SUCCESS)
16953  {
16954  res = AllocateMemoryOfType(
16955  vkMemReq.size,
16956  vkMemReq.alignment,
16957  requiresDedicatedAllocation || prefersDedicatedAllocation,
16958  dedicatedBuffer,
16959  dedicatedBufferUsage,
16960  dedicatedImage,
16961  createInfo,
16962  memTypeIndex,
16963  suballocType,
16964  allocationCount,
16965  pAllocations);
16966  // Allocation from this alternative memory type succeeded.
16967  if(res == VK_SUCCESS)
16968  {
16969  return res;
16970  }
16971  // else: Allocation from this memory type failed. Try next one - next loop iteration.
16972  }
16973  // No other matching memory type index could be found.
16974  else
16975  {
16976  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
16977  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16978  }
16979  }
16980  }
16981  }
16982  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
16983  else
16984  return res;
16985  }
16986 }
16987 
16988 void VmaAllocator_T::FreeMemory(
16989  size_t allocationCount,
16990  const VmaAllocation* pAllocations)
16991 {
16992  VMA_ASSERT(pAllocations);
16993 
16994  for(size_t allocIndex = allocationCount; allocIndex--; )
16995  {
16996  VmaAllocation allocation = pAllocations[allocIndex];
16997 
16998  if(allocation != VK_NULL_HANDLE)
16999  {
17000  if(TouchAllocation(allocation))
17001  {
17002  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
17003  {
17004  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
17005  }
17006 
17007  switch(allocation->GetType())
17008  {
17009  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17010  {
17011  VmaBlockVector* pBlockVector = VMA_NULL;
17012  VmaPool hPool = allocation->GetBlock()->GetParentPool();
17013  if(hPool != VK_NULL_HANDLE)
17014  {
17015  pBlockVector = &hPool->m_BlockVector;
17016  }
17017  else
17018  {
17019  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17020  pBlockVector = m_pBlockVectors[memTypeIndex];
17021  }
17022  pBlockVector->Free(allocation);
17023  }
17024  break;
17025  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17026  FreeDedicatedMemory(allocation);
17027  break;
17028  default:
17029  VMA_ASSERT(0);
17030  }
17031  }
17032 
17033  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
17034  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
17035  allocation->SetUserData(this, VMA_NULL);
17036  m_AllocationObjectAllocator.Free(allocation);
17037  }
17038  }
17039 }
17040 
17041 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
17042 {
17043  // Initialize.
17044  InitStatInfo(pStats->total);
17045  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
17046  InitStatInfo(pStats->memoryType[i]);
17047  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
17048  InitStatInfo(pStats->memoryHeap[i]);
17049 
17050  // Process default pools.
17051  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17052  {
17053  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
17054  VMA_ASSERT(pBlockVector);
17055  pBlockVector->AddStats(pStats);
17056  }
17057 
17058  // Process custom pools.
17059  {
17060  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17061  for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
17062  {
17063  pool->m_BlockVector.AddStats(pStats);
17064  }
17065  }
17066 
17067  // Process dedicated allocations.
17068  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17069  {
17070  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
17071  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17072  DedicatedAllocationLinkedList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
17073  for(VmaAllocation alloc = dedicatedAllocList.Front();
17074  alloc != VMA_NULL; alloc = dedicatedAllocList.GetNext(alloc))
17075  {
17076  VmaStatInfo allocationStatInfo;
17077  alloc->DedicatedAllocCalcStatsInfo(allocationStatInfo);
17078  VmaAddStatInfo(pStats->total, allocationStatInfo);
17079  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
17080  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
17081  }
17082  }
17083 
17084  // Postprocess.
17085  VmaPostprocessCalcStatInfo(pStats->total);
17086  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
17087  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
17088  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
17089  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
17090 }
17091 
17092 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
17093 {
17094 #if VMA_MEMORY_BUDGET
17095  if(m_UseExtMemoryBudget)
17096  {
17097  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
17098  {
17099  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
17100  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
17101  {
17102  const uint32_t heapIndex = firstHeap + i;
17103 
17104  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
17105  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
17106 
17107  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
17108  {
17109  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
17110  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
17111  }
17112  else
17113  {
17114  outBudget->usage = 0;
17115  }
17116 
17117  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
17118  outBudget->budget = VMA_MIN(
17119  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
17120  }
17121  }
17122  else
17123  {
17124  UpdateVulkanBudget(); // Outside of mutex lock
17125  GetBudget(outBudget, firstHeap, heapCount); // Recursion
17126  }
17127  }
17128  else
17129 #endif
17130  {
17131  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
17132  {
17133  const uint32_t heapIndex = firstHeap + i;
17134 
17135  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
17136  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
17137 
17138  outBudget->usage = outBudget->blockBytes;
17139  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
17140  }
17141  }
17142 }
17143 
17144 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
17145 
17146 VkResult VmaAllocator_T::DefragmentationBegin(
17147  const VmaDefragmentationInfo2& info,
17148  VmaDefragmentationStats* pStats,
17149  VmaDefragmentationContext* pContext)
17150 {
17151  if(info.pAllocationsChanged != VMA_NULL)
17152  {
17153  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
17154  }
17155 
17156  *pContext = vma_new(this, VmaDefragmentationContext_T)(
17157  this, m_CurrentFrameIndex.load(), info.flags, pStats);
17158 
17159  (*pContext)->AddPools(info.poolCount, info.pPools);
17160  (*pContext)->AddAllocations(
17162 
17163  VkResult res = (*pContext)->Defragment(
17166  info.commandBuffer, pStats, info.flags);
17167 
17168  if(res != VK_NOT_READY)
17169  {
17170  vma_delete(this, *pContext);
17171  *pContext = VMA_NULL;
17172  }
17173 
17174  return res;
17175 }
17176 
17177 VkResult VmaAllocator_T::DefragmentationEnd(
17178  VmaDefragmentationContext context)
17179 {
17180  vma_delete(this, context);
17181  return VK_SUCCESS;
17182 }
17183 
17184 VkResult VmaAllocator_T::DefragmentationPassBegin(
17186  VmaDefragmentationContext context)
17187 {
17188  return context->DefragmentPassBegin(pInfo);
17189 }
17190 VkResult VmaAllocator_T::DefragmentationPassEnd(
17191  VmaDefragmentationContext context)
17192 {
17193  return context->DefragmentPassEnd();
17194 
17195 }
17196 
17197 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
17198 {
17199  if(hAllocation->CanBecomeLost())
17200  {
17201  /*
17202  Warning: This is a carefully designed algorithm.
17203  Do not modify unless you really know what you're doing :)
17204  */
17205  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17206  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17207  for(;;)
17208  {
17209  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
17210  {
17211  pAllocationInfo->memoryType = UINT32_MAX;
17212  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
17213  pAllocationInfo->offset = 0;
17214  pAllocationInfo->size = hAllocation->GetSize();
17215  pAllocationInfo->pMappedData = VMA_NULL;
17216  pAllocationInfo->pUserData = hAllocation->GetUserData();
17217  return;
17218  }
17219  else if(localLastUseFrameIndex == localCurrFrameIndex)
17220  {
17221  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
17222  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
17223  pAllocationInfo->offset = hAllocation->GetOffset();
17224  pAllocationInfo->size = hAllocation->GetSize();
17225  pAllocationInfo->pMappedData = VMA_NULL;
17226  pAllocationInfo->pUserData = hAllocation->GetUserData();
17227  return;
17228  }
17229  else // Last use time earlier than current time.
17230  {
17231  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17232  {
17233  localLastUseFrameIndex = localCurrFrameIndex;
17234  }
17235  }
17236  }
17237  }
17238  else
17239  {
17240 #if VMA_STATS_STRING_ENABLED
17241  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17242  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17243  for(;;)
17244  {
17245  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
17246  if(localLastUseFrameIndex == localCurrFrameIndex)
17247  {
17248  break;
17249  }
17250  else // Last use time earlier than current time.
17251  {
17252  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17253  {
17254  localLastUseFrameIndex = localCurrFrameIndex;
17255  }
17256  }
17257  }
17258 #endif
17259 
17260  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
17261  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
17262  pAllocationInfo->offset = hAllocation->GetOffset();
17263  pAllocationInfo->size = hAllocation->GetSize();
17264  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
17265  pAllocationInfo->pUserData = hAllocation->GetUserData();
17266  }
17267 }
17268 
17269 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
17270 {
17271  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
17272  if(hAllocation->CanBecomeLost())
17273  {
17274  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17275  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17276  for(;;)
17277  {
17278  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
17279  {
17280  return false;
17281  }
17282  else if(localLastUseFrameIndex == localCurrFrameIndex)
17283  {
17284  return true;
17285  }
17286  else // Last use time earlier than current time.
17287  {
17288  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17289  {
17290  localLastUseFrameIndex = localCurrFrameIndex;
17291  }
17292  }
17293  }
17294  }
17295  else
17296  {
17297 #if VMA_STATS_STRING_ENABLED
17298  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
17299  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
17300  for(;;)
17301  {
17302  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
17303  if(localLastUseFrameIndex == localCurrFrameIndex)
17304  {
17305  break;
17306  }
17307  else // Last use time earlier than current time.
17308  {
17309  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
17310  {
17311  localLastUseFrameIndex = localCurrFrameIndex;
17312  }
17313  }
17314  }
17315 #endif
17316 
17317  return true;
17318  }
17319 }
17320 
17321 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
17322 {
17323  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
17324 
17325  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
17326 
17327  // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
17328  if(pCreateInfo->pMemoryAllocateNext)
17329  {
17330  VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
17331  }
17332 
17333  if(newCreateInfo.maxBlockCount == 0)
17334  {
17335  newCreateInfo.maxBlockCount = SIZE_MAX;
17336  }
17337  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
17338  {
17339  return VK_ERROR_INITIALIZATION_FAILED;
17340  }
17341  // Memory type index out of range or forbidden.
17342  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
17343  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
17344  {
17345  return VK_ERROR_FEATURE_NOT_PRESENT;
17346  }
17347  if(newCreateInfo.minAllocationAlignment > 0)
17348  {
17349  VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
17350  }
17351 
17352  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
17353 
17354  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
17355 
17356  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
17357  if(res != VK_SUCCESS)
17358  {
17359  vma_delete(this, *pPool);
17360  *pPool = VMA_NULL;
17361  return res;
17362  }
17363 
17364  // Add to m_Pools.
17365  {
17366  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
17367  (*pPool)->SetId(m_NextPoolId++);
17368  m_Pools.PushBack(*pPool);
17369  }
17370 
17371  return VK_SUCCESS;
17372 }
17373 
17374 void VmaAllocator_T::DestroyPool(VmaPool pool)
17375 {
17376  // Remove from m_Pools.
17377  {
17378  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
17379  m_Pools.Remove(pool);
17380  }
17381 
17382  vma_delete(this, pool);
17383 }
17384 
17385 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
17386 {
17387  pool->m_BlockVector.GetPoolStats(pPoolStats);
17388 }
17389 
17390 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
17391 {
17392  m_CurrentFrameIndex.store(frameIndex);
17393 
17394 #if VMA_MEMORY_BUDGET
17395  if(m_UseExtMemoryBudget)
17396  {
17397  UpdateVulkanBudget();
17398  }
17399 #endif // #if VMA_MEMORY_BUDGET
17400 }
17401 
17402 void VmaAllocator_T::MakePoolAllocationsLost(
17403  VmaPool hPool,
17404  size_t* pLostAllocationCount)
17405 {
17406  hPool->m_BlockVector.MakePoolAllocationsLost(
17407  m_CurrentFrameIndex.load(),
17408  pLostAllocationCount);
17409 }
17410 
17411 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
17412 {
17413  return hPool->m_BlockVector.CheckCorruption();
17414 }
17415 
17416 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
17417 {
17418  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
17419 
17420  // Process default pools.
17421  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17422  {
17423  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
17424  {
17425  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
17426  VMA_ASSERT(pBlockVector);
17427  VkResult localRes = pBlockVector->CheckCorruption();
17428  switch(localRes)
17429  {
17430  case VK_ERROR_FEATURE_NOT_PRESENT:
17431  break;
17432  case VK_SUCCESS:
17433  finalRes = VK_SUCCESS;
17434  break;
17435  default:
17436  return localRes;
17437  }
17438  }
17439  }
17440 
17441  // Process custom pools.
17442  {
17443  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17444  for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
17445  {
17446  if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
17447  {
17448  VkResult localRes = pool->m_BlockVector.CheckCorruption();
17449  switch(localRes)
17450  {
17451  case VK_ERROR_FEATURE_NOT_PRESENT:
17452  break;
17453  case VK_SUCCESS:
17454  finalRes = VK_SUCCESS;
17455  break;
17456  default:
17457  return localRes;
17458  }
17459  }
17460  }
17461  }
17462 
17463  return finalRes;
17464 }
17465 
17466 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
17467 {
17468  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
17469  (*pAllocation)->InitLost();
17470 }
17471 
17472 // An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
17473 template<typename T>
17474 struct AtomicTransactionalIncrement
17475 {
17476 public:
17477  typedef std::atomic<T> AtomicT;
17478  ~AtomicTransactionalIncrement()
17479  {
17480  if(m_Atomic)
17481  --(*m_Atomic);
17482  }
17483  T Increment(AtomicT* atomic)
17484  {
17485  m_Atomic = atomic;
17486  return m_Atomic->fetch_add(1);
17487  }
17488  void Commit()
17489  {
17490  m_Atomic = nullptr;
17491  }
17492 
17493 private:
17494  AtomicT* m_Atomic = nullptr;
17495 };
17496 
17497 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
17498 {
17499  AtomicTransactionalIncrement<uint32_t> deviceMemoryCountIncrement;
17500  const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
17501 #if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
17502  if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
17503  {
17504  return VK_ERROR_TOO_MANY_OBJECTS;
17505  }
17506 #endif
17507 
17508  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
17509 
17510  // HeapSizeLimit is in effect for this heap.
17511  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
17512  {
17513  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
17514  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
17515  for(;;)
17516  {
17517  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
17518  if(blockBytesAfterAllocation > heapSize)
17519  {
17520  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
17521  }
17522  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
17523  {
17524  break;
17525  }
17526  }
17527  }
17528  else
17529  {
17530  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
17531  }
17532 
17533  // VULKAN CALL vkAllocateMemory.
17534  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
17535 
17536  if(res == VK_SUCCESS)
17537  {
17538 #if VMA_MEMORY_BUDGET
17539  ++m_Budget.m_OperationsSinceBudgetFetch;
17540 #endif
17541 
17542  // Informative callback.
17543  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
17544  {
17545  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
17546  }
17547 
17548  deviceMemoryCountIncrement.Commit();
17549  }
17550  else
17551  {
17552  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
17553  }
17554 
17555  return res;
17556 }
17557 
17558 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
17559 {
17560  // Informative callback.
17561  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
17562  {
17563  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
17564  }
17565 
17566  // VULKAN CALL vkFreeMemory.
17567  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
17568 
17569  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
17570 
17571  --m_DeviceMemoryCount;
17572 }
17573 
17574 VkResult VmaAllocator_T::BindVulkanBuffer(
17575  VkDeviceMemory memory,
17576  VkDeviceSize memoryOffset,
17577  VkBuffer buffer,
17578  const void* pNext)
17579 {
17580  if(pNext != VMA_NULL)
17581  {
17582 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17583  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17584  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
17585  {
17586  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
17587  bindBufferMemoryInfo.pNext = pNext;
17588  bindBufferMemoryInfo.buffer = buffer;
17589  bindBufferMemoryInfo.memory = memory;
17590  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17591  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17592  }
17593  else
17594 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17595  {
17596  return VK_ERROR_EXTENSION_NOT_PRESENT;
17597  }
17598  }
17599  else
17600  {
17601  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
17602  }
17603 }
17604 
17605 VkResult VmaAllocator_T::BindVulkanImage(
17606  VkDeviceMemory memory,
17607  VkDeviceSize memoryOffset,
17608  VkImage image,
17609  const void* pNext)
17610 {
17611  if(pNext != VMA_NULL)
17612  {
17613 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17614  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17615  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
17616  {
17617  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
17618  bindBufferMemoryInfo.pNext = pNext;
17619  bindBufferMemoryInfo.image = image;
17620  bindBufferMemoryInfo.memory = memory;
17621  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17622  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17623  }
17624  else
17625 #endif // #if VMA_BIND_MEMORY2
17626  {
17627  return VK_ERROR_EXTENSION_NOT_PRESENT;
17628  }
17629  }
17630  else
17631  {
17632  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
17633  }
17634 }
17635 
17636 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
17637 {
17638  if(hAllocation->CanBecomeLost())
17639  {
17640  return VK_ERROR_MEMORY_MAP_FAILED;
17641  }
17642 
17643  switch(hAllocation->GetType())
17644  {
17645  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17646  {
17647  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17648  char *pBytes = VMA_NULL;
17649  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
17650  if(res == VK_SUCCESS)
17651  {
17652  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
17653  hAllocation->BlockAllocMap();
17654  }
17655  return res;
17656  }
17657  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17658  return hAllocation->DedicatedAllocMap(this, ppData);
17659  default:
17660  VMA_ASSERT(0);
17661  return VK_ERROR_MEMORY_MAP_FAILED;
17662  }
17663 }
17664 
17665 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
17666 {
17667  switch(hAllocation->GetType())
17668  {
17669  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17670  {
17671  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17672  hAllocation->BlockAllocUnmap();
17673  pBlock->Unmap(this, 1);
17674  }
17675  break;
17676  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17677  hAllocation->DedicatedAllocUnmap(this);
17678  break;
17679  default:
17680  VMA_ASSERT(0);
17681  }
17682 }
17683 
17684 VkResult VmaAllocator_T::BindBufferMemory(
17685  VmaAllocation hAllocation,
17686  VkDeviceSize allocationLocalOffset,
17687  VkBuffer hBuffer,
17688  const void* pNext)
17689 {
17690  VkResult res = VK_SUCCESS;
17691  switch(hAllocation->GetType())
17692  {
17693  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17694  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
17695  break;
17696  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17697  {
17698  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17699  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
17700  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
17701  break;
17702  }
17703  default:
17704  VMA_ASSERT(0);
17705  }
17706  return res;
17707 }
17708 
17709 VkResult VmaAllocator_T::BindImageMemory(
17710  VmaAllocation hAllocation,
17711  VkDeviceSize allocationLocalOffset,
17712  VkImage hImage,
17713  const void* pNext)
17714 {
17715  VkResult res = VK_SUCCESS;
17716  switch(hAllocation->GetType())
17717  {
17718  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17719  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
17720  break;
17721  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17722  {
17723  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
17724  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
17725  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
17726  break;
17727  }
17728  default:
17729  VMA_ASSERT(0);
17730  }
17731  return res;
17732 }
17733 
17734 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
17735  VmaAllocation hAllocation,
17736  VkDeviceSize offset, VkDeviceSize size,
17737  VMA_CACHE_OPERATION op)
17738 {
17739  VkResult res = VK_SUCCESS;
17740 
17741  VkMappedMemoryRange memRange = {};
17742  if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
17743  {
17744  switch(op)
17745  {
17746  case VMA_CACHE_FLUSH:
17747  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
17748  break;
17749  case VMA_CACHE_INVALIDATE:
17750  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
17751  break;
17752  default:
17753  VMA_ASSERT(0);
17754  }
17755  }
17756  // else: Just ignore this call.
17757  return res;
17758 }
17759 
17760 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
17761  uint32_t allocationCount,
17762  const VmaAllocation* allocations,
17763  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
17764  VMA_CACHE_OPERATION op)
17765 {
17766  typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
17767  typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
17768  RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
17769 
17770  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
17771  {
17772  const VmaAllocation alloc = allocations[allocIndex];
17773  const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
17774  const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
17775  VkMappedMemoryRange newRange;
17776  if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
17777  {
17778  ranges.push_back(newRange);
17779  }
17780  }
17781 
17782  VkResult res = VK_SUCCESS;
17783  if(!ranges.empty())
17784  {
17785  switch(op)
17786  {
17787  case VMA_CACHE_FLUSH:
17788  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17789  break;
17790  case VMA_CACHE_INVALIDATE:
17791  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17792  break;
17793  default:
17794  VMA_ASSERT(0);
17795  }
17796  }
17797  // else: Just ignore this call.
17798  return res;
17799 }
17800 
17801 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
17802 {
17803  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
17804 
17805  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17806  {
17807  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17808  DedicatedAllocationLinkedList& dedicatedAllocations = m_DedicatedAllocations[memTypeIndex];
17809  dedicatedAllocations.Remove(allocation);
17810  }
17811 
17812  VkDeviceMemory hMemory = allocation->GetMemory();
17813 
17814  /*
17815  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
17816  before vkFreeMemory.
17817 
17818  if(allocation->GetMappedData() != VMA_NULL)
17819  {
17820  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
17821  }
17822  */
17823 
17824  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
17825 
17826  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
17827 }
17828 
17829 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
17830 {
17831  VkBufferCreateInfo dummyBufCreateInfo;
17832  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
17833 
17834  uint32_t memoryTypeBits = 0;
17835 
17836  // Create buffer.
17837  VkBuffer buf = VK_NULL_HANDLE;
17838  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
17839  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
17840  if(res == VK_SUCCESS)
17841  {
17842  // Query for supported memory types.
17843  VkMemoryRequirements memReq;
17844  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
17845  memoryTypeBits = memReq.memoryTypeBits;
17846 
17847  // Destroy buffer.
17848  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
17849  }
17850 
17851  return memoryTypeBits;
17852 }
17853 
17854 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
17855 {
17856  // Make sure memory information is already fetched.
17857  VMA_ASSERT(GetMemoryTypeCount() > 0);
17858 
17859  uint32_t memoryTypeBits = UINT32_MAX;
17860 
17861  if(!m_UseAmdDeviceCoherentMemory)
17862  {
17863  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
17864  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17865  {
17866  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17867  {
17868  memoryTypeBits &= ~(1u << memTypeIndex);
17869  }
17870  }
17871  }
17872 
17873  return memoryTypeBits;
17874 }
17875 
17876 bool VmaAllocator_T::GetFlushOrInvalidateRange(
17877  VmaAllocation allocation,
17878  VkDeviceSize offset, VkDeviceSize size,
17879  VkMappedMemoryRange& outRange) const
17880 {
17881  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17882  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
17883  {
17884  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
17885  const VkDeviceSize allocationSize = allocation->GetSize();
17886  VMA_ASSERT(offset <= allocationSize);
17887 
17888  outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
17889  outRange.pNext = VMA_NULL;
17890  outRange.memory = allocation->GetMemory();
17891 
17892  switch(allocation->GetType())
17893  {
17894  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17895  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17896  if(size == VK_WHOLE_SIZE)
17897  {
17898  outRange.size = allocationSize - outRange.offset;
17899  }
17900  else
17901  {
17902  VMA_ASSERT(offset + size <= allocationSize);
17903  outRange.size = VMA_MIN(
17904  VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
17905  allocationSize - outRange.offset);
17906  }
17907  break;
17908  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17909  {
17910  // 1. Still within this allocation.
17911  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17912  if(size == VK_WHOLE_SIZE)
17913  {
17914  size = allocationSize - offset;
17915  }
17916  else
17917  {
17918  VMA_ASSERT(offset + size <= allocationSize);
17919  }
17920  outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
17921 
17922  // 2. Adjust to whole block.
17923  const VkDeviceSize allocationOffset = allocation->GetOffset();
17924  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
17925  const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
17926  outRange.offset += allocationOffset;
17927  outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
17928 
17929  break;
17930  }
17931  default:
17932  VMA_ASSERT(0);
17933  }
17934  return true;
17935  }
17936  return false;
17937 }
17938 
17939 #if VMA_MEMORY_BUDGET
17940 
17941 void VmaAllocator_T::UpdateVulkanBudget()
17942 {
17943  VMA_ASSERT(m_UseExtMemoryBudget);
17944 
17945  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
17946 
17947  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
17948  VmaPnextChainPushFront(&memProps, &budgetProps);
17949 
17950  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
17951 
17952  {
17953  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
17954 
17955  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
17956  {
17957  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
17958  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
17959  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
17960 
17961  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
17962  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
17963  {
17964  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
17965  }
17966  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
17967  {
17968  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
17969  }
17970  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
17971  {
17972  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
17973  }
17974  }
17975  m_Budget.m_OperationsSinceBudgetFetch = 0;
17976  }
17977 }
17978 
17979 #endif // #if VMA_MEMORY_BUDGET
17980 
17981 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
17982 {
17983  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
17984  !hAllocation->CanBecomeLost() &&
17985  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17986  {
17987  void* pData = VMA_NULL;
17988  VkResult res = Map(hAllocation, &pData);
17989  if(res == VK_SUCCESS)
17990  {
17991  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
17992  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
17993  Unmap(hAllocation);
17994  }
17995  else
17996  {
17997  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
17998  }
17999  }
18000 }
18001 
18002 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
18003 {
18004  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
18005  if(memoryTypeBits == UINT32_MAX)
18006  {
18007  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
18008  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
18009  }
18010  return memoryTypeBits;
18011 }
18012 
18013 #if VMA_STATS_STRING_ENABLED
18014 
18015 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
18016 {
18017  bool dedicatedAllocationsStarted = false;
18018  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
18019  {
18020  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
18021  DedicatedAllocationLinkedList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
18022  if(!dedicatedAllocList.IsEmpty())
18023  {
18024  if(dedicatedAllocationsStarted == false)
18025  {
18026  dedicatedAllocationsStarted = true;
18027  json.WriteString("DedicatedAllocations");
18028  json.BeginObject();
18029  }
18030 
18031  json.BeginString("Type ");
18032  json.ContinueString(memTypeIndex);
18033  json.EndString();
18034 
18035  json.BeginArray();
18036 
18037  for(VmaAllocation alloc = dedicatedAllocList.Front();
18038  alloc != VMA_NULL; alloc = dedicatedAllocList.GetNext(alloc))
18039  {
18040  json.BeginObject(true);
18041  alloc->PrintParameters(json);
18042  json.EndObject();
18043  }
18044 
18045  json.EndArray();
18046  }
18047  }
18048  if(dedicatedAllocationsStarted)
18049  {
18050  json.EndObject();
18051  }
18052 
18053  {
18054  bool allocationsStarted = false;
18055  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
18056  {
18057  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
18058  {
18059  if(allocationsStarted == false)
18060  {
18061  allocationsStarted = true;
18062  json.WriteString("DefaultPools");
18063  json.BeginObject();
18064  }
18065 
18066  json.BeginString("Type ");
18067  json.ContinueString(memTypeIndex);
18068  json.EndString();
18069 
18070  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
18071  }
18072  }
18073  if(allocationsStarted)
18074  {
18075  json.EndObject();
18076  }
18077  }
18078 
18079  // Custom pools
18080  {
18081  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
18082  if(!m_Pools.IsEmpty())
18083  {
18084  json.WriteString("Pools");
18085  json.BeginObject();
18086  for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
18087  {
18088  json.BeginString();
18089  json.ContinueString(pool->GetId());
18090  json.EndString();
18091 
18092  pool->m_BlockVector.PrintDetailedMap(json);
18093  }
18094  json.EndObject();
18095  }
18096  }
18097 }
18098 
18099 #endif // #if VMA_STATS_STRING_ENABLED
18100 
18102 // Public interface
18103 
18104 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
18105  const VmaAllocatorCreateInfo* pCreateInfo,
18106  VmaAllocator* pAllocator)
18107 {
18108  VMA_ASSERT(pCreateInfo && pAllocator);
18109  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
18110  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
18111  VMA_DEBUG_LOG("vmaCreateAllocator");
18112  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
18113  return (*pAllocator)->Init(pCreateInfo);
18114 }
18115 
18116 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
18117  VmaAllocator allocator)
18118 {
18119  if(allocator != VK_NULL_HANDLE)
18120  {
18121  VMA_DEBUG_LOG("vmaDestroyAllocator");
18122  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
18123  vma_delete(&allocationCallbacks, allocator);
18124  }
18125 }
18126 
18127 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
18128 {
18129  VMA_ASSERT(allocator && pAllocatorInfo);
18130  pAllocatorInfo->instance = allocator->m_hInstance;
18131  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
18132  pAllocatorInfo->device = allocator->m_hDevice;
18133 }
18134 
18135 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
18136  VmaAllocator allocator,
18137  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
18138 {
18139  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
18140  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
18141 }
18142 
18143 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
18144  VmaAllocator allocator,
18145  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
18146 {
18147  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
18148  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
18149 }
18150 
18151 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
18152  VmaAllocator allocator,
18153  uint32_t memoryTypeIndex,
18154  VkMemoryPropertyFlags* pFlags)
18155 {
18156  VMA_ASSERT(allocator && pFlags);
18157  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
18158  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
18159 }
18160 
18161 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
18162  VmaAllocator allocator,
18163  uint32_t frameIndex)
18164 {
18165  VMA_ASSERT(allocator);
18166  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
18167 
18168  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18169 
18170  allocator->SetCurrentFrameIndex(frameIndex);
18171 }
18172 
18173 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
18174  VmaAllocator allocator,
18175  VmaStats* pStats)
18176 {
18177  VMA_ASSERT(allocator && pStats);
18178  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18179  allocator->CalculateStats(pStats);
18180 }
18181 
18182 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
18183  VmaAllocator allocator,
18184  VmaBudget* pBudget)
18185 {
18186  VMA_ASSERT(allocator && pBudget);
18187  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18188  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
18189 }
18190 
18191 #if VMA_STATS_STRING_ENABLED
18192 
18193 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
18194  VmaAllocator allocator,
18195  char** ppStatsString,
18196  VkBool32 detailedMap)
18197 {
18198  VMA_ASSERT(allocator && ppStatsString);
18199  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18200 
18201  VmaStringBuilder sb(allocator);
18202  {
18203  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
18204  json.BeginObject();
18205 
18206  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
18207  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
18208 
18209  VmaStats stats;
18210  allocator->CalculateStats(&stats);
18211 
18212  json.WriteString("Total");
18213  VmaPrintStatInfo(json, stats.total);
18214 
18215  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
18216  {
18217  json.BeginString("Heap ");
18218  json.ContinueString(heapIndex);
18219  json.EndString();
18220  json.BeginObject();
18221 
18222  json.WriteString("Size");
18223  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
18224 
18225  json.WriteString("Flags");
18226  json.BeginArray(true);
18227  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
18228  {
18229  json.WriteString("DEVICE_LOCAL");
18230  }
18231  json.EndArray();
18232 
18233  json.WriteString("Budget");
18234  json.BeginObject();
18235  {
18236  json.WriteString("BlockBytes");
18237  json.WriteNumber(budget[heapIndex].blockBytes);
18238  json.WriteString("AllocationBytes");
18239  json.WriteNumber(budget[heapIndex].allocationBytes);
18240  json.WriteString("Usage");
18241  json.WriteNumber(budget[heapIndex].usage);
18242  json.WriteString("Budget");
18243  json.WriteNumber(budget[heapIndex].budget);
18244  }
18245  json.EndObject();
18246 
18247  if(stats.memoryHeap[heapIndex].blockCount > 0)
18248  {
18249  json.WriteString("Stats");
18250  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
18251  }
18252 
18253  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
18254  {
18255  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
18256  {
18257  json.BeginString("Type ");
18258  json.ContinueString(typeIndex);
18259  json.EndString();
18260 
18261  json.BeginObject();
18262 
18263  json.WriteString("Flags");
18264  json.BeginArray(true);
18265  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
18266  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
18267  {
18268  json.WriteString("DEVICE_LOCAL");
18269  }
18270  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
18271  {
18272  json.WriteString("HOST_VISIBLE");
18273  }
18274  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
18275  {
18276  json.WriteString("HOST_COHERENT");
18277  }
18278  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
18279  {
18280  json.WriteString("HOST_CACHED");
18281  }
18282  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
18283  {
18284  json.WriteString("LAZILY_ALLOCATED");
18285  }
18286 #if VMA_VULKAN_VERSION >= 1001000
18287  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
18288  {
18289  json.WriteString("PROTECTED");
18290  }
18291 #endif // #if VMA_VULKAN_VERSION >= 1001000
18292 #if VK_AMD_device_coherent_memory
18293  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
18294  {
18295  json.WriteString("DEVICE_COHERENT");
18296  }
18297  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
18298  {
18299  json.WriteString("DEVICE_UNCACHED");
18300  }
18301 #endif // #if VK_AMD_device_coherent_memory
18302  json.EndArray();
18303 
18304  if(stats.memoryType[typeIndex].blockCount > 0)
18305  {
18306  json.WriteString("Stats");
18307  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
18308  }
18309 
18310  json.EndObject();
18311  }
18312  }
18313 
18314  json.EndObject();
18315  }
18316  if(detailedMap == VK_TRUE)
18317  {
18318  allocator->PrintDetailedMap(json);
18319  }
18320 
18321  json.EndObject();
18322  }
18323 
18324  const size_t len = sb.GetLength();
18325  char* const pChars = vma_new_array(allocator, char, len + 1);
18326  if(len > 0)
18327  {
18328  memcpy(pChars, sb.GetData(), len);
18329  }
18330  pChars[len] = '\0';
18331  *ppStatsString = pChars;
18332 }
18333 
18334 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
18335  VmaAllocator allocator,
18336  char* pStatsString)
18337 {
18338  if(pStatsString != VMA_NULL)
18339  {
18340  VMA_ASSERT(allocator);
18341  size_t len = strlen(pStatsString);
18342  vma_delete_array(allocator, pStatsString, len + 1);
18343  }
18344 }
18345 
18346 #endif // #if VMA_STATS_STRING_ENABLED
18347 
18348 /*
18349 This function is not protected by any mutex because it just reads immutable data.
18350 */
18351 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
18352  VmaAllocator allocator,
18353  uint32_t memoryTypeBits,
18354  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18355  uint32_t* pMemoryTypeIndex)
18356 {
18357  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18358  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18359  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18360 
18361  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
18362 
18363  if(pAllocationCreateInfo->memoryTypeBits != 0)
18364  {
18365  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
18366  }
18367 
18368  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
18369  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
18370  uint32_t notPreferredFlags = 0;
18371 
18372  // Convert usage to requiredFlags and preferredFlags.
18373  switch(pAllocationCreateInfo->usage)
18374  {
18376  break;
18378  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
18379  {
18380  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18381  }
18382  break;
18384  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
18385  break;
18387  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
18388  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
18389  {
18390  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18391  }
18392  break;
18394  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
18395  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
18396  break;
18398  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
18399  break;
18401  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
18402  break;
18403  default:
18404  VMA_ASSERT(0);
18405  break;
18406  }
18407 
18408  // Avoid DEVICE_COHERENT unless explicitly requested.
18409  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
18410  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
18411  {
18412  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
18413  }
18414 
18415  *pMemoryTypeIndex = UINT32_MAX;
18416  uint32_t minCost = UINT32_MAX;
18417  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
18418  memTypeIndex < allocator->GetMemoryTypeCount();
18419  ++memTypeIndex, memTypeBit <<= 1)
18420  {
18421  // This memory type is acceptable according to memoryTypeBits bitmask.
18422  if((memTypeBit & memoryTypeBits) != 0)
18423  {
18424  const VkMemoryPropertyFlags currFlags =
18425  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
18426  // This memory type contains requiredFlags.
18427  if((requiredFlags & ~currFlags) == 0)
18428  {
18429  // Calculate cost as number of bits from preferredFlags not present in this memory type.
18430  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
18431  VmaCountBitsSet(currFlags & notPreferredFlags);
18432  // Remember memory type with lowest cost.
18433  if(currCost < minCost)
18434  {
18435  *pMemoryTypeIndex = memTypeIndex;
18436  if(currCost == 0)
18437  {
18438  return VK_SUCCESS;
18439  }
18440  minCost = currCost;
18441  }
18442  }
18443  }
18444  }
18445  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
18446 }
18447 
18448 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
18449  VmaAllocator allocator,
18450  const VkBufferCreateInfo* pBufferCreateInfo,
18451  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18452  uint32_t* pMemoryTypeIndex)
18453 {
18454  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18455  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
18456  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18457  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18458 
18459  const VkDevice hDev = allocator->m_hDevice;
18460  VkBuffer hBuffer = VK_NULL_HANDLE;
18461  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
18462  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
18463  if(res == VK_SUCCESS)
18464  {
18465  VkMemoryRequirements memReq = {};
18466  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
18467  hDev, hBuffer, &memReq);
18468 
18469  res = vmaFindMemoryTypeIndex(
18470  allocator,
18471  memReq.memoryTypeBits,
18472  pAllocationCreateInfo,
18473  pMemoryTypeIndex);
18474 
18475  allocator->GetVulkanFunctions().vkDestroyBuffer(
18476  hDev, hBuffer, allocator->GetAllocationCallbacks());
18477  }
18478  return res;
18479 }
18480 
18481 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
18482  VmaAllocator allocator,
18483  const VkImageCreateInfo* pImageCreateInfo,
18484  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18485  uint32_t* pMemoryTypeIndex)
18486 {
18487  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18488  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
18489  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18490  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18491 
18492  const VkDevice hDev = allocator->m_hDevice;
18493  VkImage hImage = VK_NULL_HANDLE;
18494  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
18495  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
18496  if(res == VK_SUCCESS)
18497  {
18498  VkMemoryRequirements memReq = {};
18499  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
18500  hDev, hImage, &memReq);
18501 
18502  res = vmaFindMemoryTypeIndex(
18503  allocator,
18504  memReq.memoryTypeBits,
18505  pAllocationCreateInfo,
18506  pMemoryTypeIndex);
18507 
18508  allocator->GetVulkanFunctions().vkDestroyImage(
18509  hDev, hImage, allocator->GetAllocationCallbacks());
18510  }
18511  return res;
18512 }
18513 
18514 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
18515  VmaAllocator allocator,
18516  const VmaPoolCreateInfo* pCreateInfo,
18517  VmaPool* pPool)
18518 {
18519  VMA_ASSERT(allocator && pCreateInfo && pPool);
18520 
18521  VMA_DEBUG_LOG("vmaCreatePool");
18522 
18523  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18524 
18525  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
18526 
18527 #if VMA_RECORDING_ENABLED
18528  if(allocator->GetRecorder() != VMA_NULL)
18529  {
18530  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
18531  }
18532 #endif
18533 
18534  return res;
18535 }
18536 
18537 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
18538  VmaAllocator allocator,
18539  VmaPool pool)
18540 {
18541  VMA_ASSERT(allocator);
18542 
18543  if(pool == VK_NULL_HANDLE)
18544  {
18545  return;
18546  }
18547 
18548  VMA_DEBUG_LOG("vmaDestroyPool");
18549 
18550  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18551 
18552 #if VMA_RECORDING_ENABLED
18553  if(allocator->GetRecorder() != VMA_NULL)
18554  {
18555  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
18556  }
18557 #endif
18558 
18559  allocator->DestroyPool(pool);
18560 }
18561 
18562 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
18563  VmaAllocator allocator,
18564  VmaPool pool,
18565  VmaPoolStats* pPoolStats)
18566 {
18567  VMA_ASSERT(allocator && pool && pPoolStats);
18568 
18569  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18570 
18571  allocator->GetPoolStats(pool, pPoolStats);
18572 }
18573 
18574 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
18575  VmaAllocator allocator,
18576  VmaPool pool,
18577  size_t* pLostAllocationCount)
18578 {
18579  VMA_ASSERT(allocator && pool);
18580 
18581  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18582 
18583 #if VMA_RECORDING_ENABLED
18584  if(allocator->GetRecorder() != VMA_NULL)
18585  {
18586  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
18587  }
18588 #endif
18589 
18590  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
18591 }
18592 
18593 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
18594 {
18595  VMA_ASSERT(allocator && pool);
18596 
18597  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18598 
18599  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
18600 
18601  return allocator->CheckPoolCorruption(pool);
18602 }
18603 
18604 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
18605  VmaAllocator allocator,
18606  VmaPool pool,
18607  const char** ppName)
18608 {
18609  VMA_ASSERT(allocator && pool && ppName);
18610 
18611  VMA_DEBUG_LOG("vmaGetPoolName");
18612 
18613  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18614 
18615  *ppName = pool->GetName();
18616 }
18617 
18618 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
18619  VmaAllocator allocator,
18620  VmaPool pool,
18621  const char* pName)
18622 {
18623  VMA_ASSERT(allocator && pool);
18624 
18625  VMA_DEBUG_LOG("vmaSetPoolName");
18626 
18627  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18628 
18629  pool->SetName(pName);
18630 
18631 #if VMA_RECORDING_ENABLED
18632  if(allocator->GetRecorder() != VMA_NULL)
18633  {
18634  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
18635  }
18636 #endif
18637 }
18638 
18639 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
18640  VmaAllocator allocator,
18641  const VkMemoryRequirements* pVkMemoryRequirements,
18642  const VmaAllocationCreateInfo* pCreateInfo,
18643  VmaAllocation* pAllocation,
18644  VmaAllocationInfo* pAllocationInfo)
18645 {
18646  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
18647 
18648  VMA_DEBUG_LOG("vmaAllocateMemory");
18649 
18650  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18651 
18652  VkResult result = allocator->AllocateMemory(
18653  *pVkMemoryRequirements,
18654  false, // requiresDedicatedAllocation
18655  false, // prefersDedicatedAllocation
18656  VK_NULL_HANDLE, // dedicatedBuffer
18657  UINT32_MAX, // dedicatedBufferUsage
18658  VK_NULL_HANDLE, // dedicatedImage
18659  *pCreateInfo,
18660  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18661  1, // allocationCount
18662  pAllocation);
18663 
18664 #if VMA_RECORDING_ENABLED
18665  if(allocator->GetRecorder() != VMA_NULL)
18666  {
18667  allocator->GetRecorder()->RecordAllocateMemory(
18668  allocator->GetCurrentFrameIndex(),
18669  *pVkMemoryRequirements,
18670  *pCreateInfo,
18671  *pAllocation);
18672  }
18673 #endif
18674 
18675  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18676  {
18677  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18678  }
18679 
18680  return result;
18681 }
18682 
18683 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
18684  VmaAllocator allocator,
18685  const VkMemoryRequirements* pVkMemoryRequirements,
18686  const VmaAllocationCreateInfo* pCreateInfo,
18687  size_t allocationCount,
18688  VmaAllocation* pAllocations,
18689  VmaAllocationInfo* pAllocationInfo)
18690 {
18691  if(allocationCount == 0)
18692  {
18693  return VK_SUCCESS;
18694  }
18695 
18696  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
18697 
18698  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
18699 
18700  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18701 
18702  VkResult result = allocator->AllocateMemory(
18703  *pVkMemoryRequirements,
18704  false, // requiresDedicatedAllocation
18705  false, // prefersDedicatedAllocation
18706  VK_NULL_HANDLE, // dedicatedBuffer
18707  UINT32_MAX, // dedicatedBufferUsage
18708  VK_NULL_HANDLE, // dedicatedImage
18709  *pCreateInfo,
18710  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18711  allocationCount,
18712  pAllocations);
18713 
18714 #if VMA_RECORDING_ENABLED
18715  if(allocator->GetRecorder() != VMA_NULL)
18716  {
18717  allocator->GetRecorder()->RecordAllocateMemoryPages(
18718  allocator->GetCurrentFrameIndex(),
18719  *pVkMemoryRequirements,
18720  *pCreateInfo,
18721  (uint64_t)allocationCount,
18722  pAllocations);
18723  }
18724 #endif
18725 
18726  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18727  {
18728  for(size_t i = 0; i < allocationCount; ++i)
18729  {
18730  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
18731  }
18732  }
18733 
18734  return result;
18735 }
18736 
18737 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
18738  VmaAllocator allocator,
18739  VkBuffer buffer,
18740  const VmaAllocationCreateInfo* pCreateInfo,
18741  VmaAllocation* pAllocation,
18742  VmaAllocationInfo* pAllocationInfo)
18743 {
18744  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18745 
18746  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
18747 
18748  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18749 
18750  VkMemoryRequirements vkMemReq = {};
18751  bool requiresDedicatedAllocation = false;
18752  bool prefersDedicatedAllocation = false;
18753  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
18754  requiresDedicatedAllocation,
18755  prefersDedicatedAllocation);
18756 
18757  VkResult result = allocator->AllocateMemory(
18758  vkMemReq,
18759  requiresDedicatedAllocation,
18760  prefersDedicatedAllocation,
18761  buffer, // dedicatedBuffer
18762  UINT32_MAX, // dedicatedBufferUsage
18763  VK_NULL_HANDLE, // dedicatedImage
18764  *pCreateInfo,
18765  VMA_SUBALLOCATION_TYPE_BUFFER,
18766  1, // allocationCount
18767  pAllocation);
18768 
18769 #if VMA_RECORDING_ENABLED
18770  if(allocator->GetRecorder() != VMA_NULL)
18771  {
18772  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
18773  allocator->GetCurrentFrameIndex(),
18774  vkMemReq,
18775  requiresDedicatedAllocation,
18776  prefersDedicatedAllocation,
18777  *pCreateInfo,
18778  *pAllocation);
18779  }
18780 #endif
18781 
18782  if(pAllocationInfo && result == VK_SUCCESS)
18783  {
18784  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18785  }
18786 
18787  return result;
18788 }
18789 
18790 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
18791  VmaAllocator allocator,
18792  VkImage image,
18793  const VmaAllocationCreateInfo* pCreateInfo,
18794  VmaAllocation* pAllocation,
18795  VmaAllocationInfo* pAllocationInfo)
18796 {
18797  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18798 
18799  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
18800 
18801  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18802 
18803  VkMemoryRequirements vkMemReq = {};
18804  bool requiresDedicatedAllocation = false;
18805  bool prefersDedicatedAllocation = false;
18806  allocator->GetImageMemoryRequirements(image, vkMemReq,
18807  requiresDedicatedAllocation, prefersDedicatedAllocation);
18808 
18809  VkResult result = allocator->AllocateMemory(
18810  vkMemReq,
18811  requiresDedicatedAllocation,
18812  prefersDedicatedAllocation,
18813  VK_NULL_HANDLE, // dedicatedBuffer
18814  UINT32_MAX, // dedicatedBufferUsage
18815  image, // dedicatedImage
18816  *pCreateInfo,
18817  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
18818  1, // allocationCount
18819  pAllocation);
18820 
18821 #if VMA_RECORDING_ENABLED
18822  if(allocator->GetRecorder() != VMA_NULL)
18823  {
18824  allocator->GetRecorder()->RecordAllocateMemoryForImage(
18825  allocator->GetCurrentFrameIndex(),
18826  vkMemReq,
18827  requiresDedicatedAllocation,
18828  prefersDedicatedAllocation,
18829  *pCreateInfo,
18830  *pAllocation);
18831  }
18832 #endif
18833 
18834  if(pAllocationInfo && result == VK_SUCCESS)
18835  {
18836  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18837  }
18838 
18839  return result;
18840 }
18841 
18842 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
18843  VmaAllocator allocator,
18844  VmaAllocation allocation)
18845 {
18846  VMA_ASSERT(allocator);
18847 
18848  if(allocation == VK_NULL_HANDLE)
18849  {
18850  return;
18851  }
18852 
18853  VMA_DEBUG_LOG("vmaFreeMemory");
18854 
18855  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18856 
18857 #if VMA_RECORDING_ENABLED
18858  if(allocator->GetRecorder() != VMA_NULL)
18859  {
18860  allocator->GetRecorder()->RecordFreeMemory(
18861  allocator->GetCurrentFrameIndex(),
18862  allocation);
18863  }
18864 #endif
18865 
18866  allocator->FreeMemory(
18867  1, // allocationCount
18868  &allocation);
18869 }
18870 
18871 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
18872  VmaAllocator allocator,
18873  size_t allocationCount,
18874  const VmaAllocation* pAllocations)
18875 {
18876  if(allocationCount == 0)
18877  {
18878  return;
18879  }
18880 
18881  VMA_ASSERT(allocator);
18882 
18883  VMA_DEBUG_LOG("vmaFreeMemoryPages");
18884 
18885  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18886 
18887 #if VMA_RECORDING_ENABLED
18888  if(allocator->GetRecorder() != VMA_NULL)
18889  {
18890  allocator->GetRecorder()->RecordFreeMemoryPages(
18891  allocator->GetCurrentFrameIndex(),
18892  (uint64_t)allocationCount,
18893  pAllocations);
18894  }
18895 #endif
18896 
18897  allocator->FreeMemory(allocationCount, pAllocations);
18898 }
18899 
18900 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
18901  VmaAllocator allocator,
18902  VmaAllocation allocation,
18903  VmaAllocationInfo* pAllocationInfo)
18904 {
18905  VMA_ASSERT(allocator && allocation && pAllocationInfo);
18906 
18907  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18908 
18909 #if VMA_RECORDING_ENABLED
18910  if(allocator->GetRecorder() != VMA_NULL)
18911  {
18912  allocator->GetRecorder()->RecordGetAllocationInfo(
18913  allocator->GetCurrentFrameIndex(),
18914  allocation);
18915  }
18916 #endif
18917 
18918  allocator->GetAllocationInfo(allocation, pAllocationInfo);
18919 }
18920 
18921 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
18922  VmaAllocator allocator,
18923  VmaAllocation allocation)
18924 {
18925  VMA_ASSERT(allocator && allocation);
18926 
18927  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18928 
18929 #if VMA_RECORDING_ENABLED
18930  if(allocator->GetRecorder() != VMA_NULL)
18931  {
18932  allocator->GetRecorder()->RecordTouchAllocation(
18933  allocator->GetCurrentFrameIndex(),
18934  allocation);
18935  }
18936 #endif
18937 
18938  return allocator->TouchAllocation(allocation);
18939 }
18940 
18941 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
18942  VmaAllocator allocator,
18943  VmaAllocation allocation,
18944  void* pUserData)
18945 {
18946  VMA_ASSERT(allocator && allocation);
18947 
18948  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18949 
18950  allocation->SetUserData(allocator, pUserData);
18951 
18952 #if VMA_RECORDING_ENABLED
18953  if(allocator->GetRecorder() != VMA_NULL)
18954  {
18955  allocator->GetRecorder()->RecordSetAllocationUserData(
18956  allocator->GetCurrentFrameIndex(),
18957  allocation,
18958  pUserData);
18959  }
18960 #endif
18961 }
18962 
18963 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
18964  VmaAllocator allocator,
18965  VmaAllocation* pAllocation)
18966 {
18967  VMA_ASSERT(allocator && pAllocation);
18968 
18969  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
18970 
18971  allocator->CreateLostAllocation(pAllocation);
18972 
18973 #if VMA_RECORDING_ENABLED
18974  if(allocator->GetRecorder() != VMA_NULL)
18975  {
18976  allocator->GetRecorder()->RecordCreateLostAllocation(
18977  allocator->GetCurrentFrameIndex(),
18978  *pAllocation);
18979  }
18980 #endif
18981 }
18982 
18983 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
18984  VmaAllocator allocator,
18985  VmaAllocation allocation,
18986  void** ppData)
18987 {
18988  VMA_ASSERT(allocator && allocation && ppData);
18989 
18990  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18991 
18992  VkResult res = allocator->Map(allocation, ppData);
18993 
18994 #if VMA_RECORDING_ENABLED
18995  if(allocator->GetRecorder() != VMA_NULL)
18996  {
18997  allocator->GetRecorder()->RecordMapMemory(
18998  allocator->GetCurrentFrameIndex(),
18999  allocation);
19000  }
19001 #endif
19002 
19003  return res;
19004 }
19005 
19006 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
19007  VmaAllocator allocator,
19008  VmaAllocation allocation)
19009 {
19010  VMA_ASSERT(allocator && allocation);
19011 
19012  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19013 
19014 #if VMA_RECORDING_ENABLED
19015  if(allocator->GetRecorder() != VMA_NULL)
19016  {
19017  allocator->GetRecorder()->RecordUnmapMemory(
19018  allocator->GetCurrentFrameIndex(),
19019  allocation);
19020  }
19021 #endif
19022 
19023  allocator->Unmap(allocation);
19024 }
19025 
19026 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
19027 {
19028  VMA_ASSERT(allocator && allocation);
19029 
19030  VMA_DEBUG_LOG("vmaFlushAllocation");
19031 
19032  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19033 
19034  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
19035 
19036 #if VMA_RECORDING_ENABLED
19037  if(allocator->GetRecorder() != VMA_NULL)
19038  {
19039  allocator->GetRecorder()->RecordFlushAllocation(
19040  allocator->GetCurrentFrameIndex(),
19041  allocation, offset, size);
19042  }
19043 #endif
19044 
19045  return res;
19046 }
19047 
19048 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
19049 {
19050  VMA_ASSERT(allocator && allocation);
19051 
19052  VMA_DEBUG_LOG("vmaInvalidateAllocation");
19053 
19054  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19055 
19056  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
19057 
19058 #if VMA_RECORDING_ENABLED
19059  if(allocator->GetRecorder() != VMA_NULL)
19060  {
19061  allocator->GetRecorder()->RecordInvalidateAllocation(
19062  allocator->GetCurrentFrameIndex(),
19063  allocation, offset, size);
19064  }
19065 #endif
19066 
19067  return res;
19068 }
19069 
19070 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
19071  VmaAllocator allocator,
19072  uint32_t allocationCount,
19073  const VmaAllocation* allocations,
19074  const VkDeviceSize* offsets,
19075  const VkDeviceSize* sizes)
19076 {
19077  VMA_ASSERT(allocator);
19078 
19079  if(allocationCount == 0)
19080  {
19081  return VK_SUCCESS;
19082  }
19083 
19084  VMA_ASSERT(allocations);
19085 
19086  VMA_DEBUG_LOG("vmaFlushAllocations");
19087 
19088  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19089 
19090  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
19091 
19092 #if VMA_RECORDING_ENABLED
19093  if(allocator->GetRecorder() != VMA_NULL)
19094  {
19095  //TODO
19096  }
19097 #endif
19098 
19099  return res;
19100 }
19101 
19102 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
19103  VmaAllocator allocator,
19104  uint32_t allocationCount,
19105  const VmaAllocation* allocations,
19106  const VkDeviceSize* offsets,
19107  const VkDeviceSize* sizes)
19108 {
19109  VMA_ASSERT(allocator);
19110 
19111  if(allocationCount == 0)
19112  {
19113  return VK_SUCCESS;
19114  }
19115 
19116  VMA_ASSERT(allocations);
19117 
19118  VMA_DEBUG_LOG("vmaInvalidateAllocations");
19119 
19120  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19121 
19122  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
19123 
19124 #if VMA_RECORDING_ENABLED
19125  if(allocator->GetRecorder() != VMA_NULL)
19126  {
19127  //TODO
19128  }
19129 #endif
19130 
19131  return res;
19132 }
19133 
19134 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
19135 {
19136  VMA_ASSERT(allocator);
19137 
19138  VMA_DEBUG_LOG("vmaCheckCorruption");
19139 
19140  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19141 
19142  return allocator->CheckCorruption(memoryTypeBits);
19143 }
19144 
19145 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
19146  VmaAllocator allocator,
19147  const VmaAllocation* pAllocations,
19148  size_t allocationCount,
19149  VkBool32* pAllocationsChanged,
19150  const VmaDefragmentationInfo *pDefragmentationInfo,
19151  VmaDefragmentationStats* pDefragmentationStats)
19152 {
19153  // Deprecated interface, reimplemented using new one.
19154 
19155  VmaDefragmentationInfo2 info2 = {};
19156  info2.allocationCount = (uint32_t)allocationCount;
19157  info2.pAllocations = pAllocations;
19158  info2.pAllocationsChanged = pAllocationsChanged;
19159  if(pDefragmentationInfo != VMA_NULL)
19160  {
19161  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
19162  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
19163  }
19164  else
19165  {
19166  info2.maxCpuAllocationsToMove = UINT32_MAX;
19167  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
19168  }
19169  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
19170 
19172  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
19173  if(res == VK_NOT_READY)
19174  {
19175  res = vmaDefragmentationEnd( allocator, ctx);
19176  }
19177  return res;
19178 }
19179 
19180 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
19181  VmaAllocator allocator,
19182  const VmaDefragmentationInfo2* pInfo,
19183  VmaDefragmentationStats* pStats,
19184  VmaDefragmentationContext *pContext)
19185 {
19186  VMA_ASSERT(allocator && pInfo && pContext);
19187 
19188  // Degenerate case: Nothing to defragment.
19189  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
19190  {
19191  return VK_SUCCESS;
19192  }
19193 
19194  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
19195  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
19196  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
19197  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
19198 
19199  VMA_DEBUG_LOG("vmaDefragmentationBegin");
19200 
19201  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19202 
19203  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
19204 
19205 #if VMA_RECORDING_ENABLED
19206  if(allocator->GetRecorder() != VMA_NULL)
19207  {
19208  allocator->GetRecorder()->RecordDefragmentationBegin(
19209  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
19210  }
19211 #endif
19212 
19213  return res;
19214 }
19215 
19216 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
19217  VmaAllocator allocator,
19218  VmaDefragmentationContext context)
19219 {
19220  VMA_ASSERT(allocator);
19221 
19222  VMA_DEBUG_LOG("vmaDefragmentationEnd");
19223 
19224  if(context != VK_NULL_HANDLE)
19225  {
19226  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19227 
19228 #if VMA_RECORDING_ENABLED
19229  if(allocator->GetRecorder() != VMA_NULL)
19230  {
19231  allocator->GetRecorder()->RecordDefragmentationEnd(
19232  allocator->GetCurrentFrameIndex(), context);
19233  }
19234 #endif
19235 
19236  return allocator->DefragmentationEnd(context);
19237  }
19238  else
19239  {
19240  return VK_SUCCESS;
19241  }
19242 }
19243 
19244 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
19245  VmaAllocator allocator,
19246  VmaDefragmentationContext context,
19248  )
19249 {
19250  VMA_ASSERT(allocator);
19251  VMA_ASSERT(pInfo);
19252 
19253  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
19254 
19255  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19256 
19257  if(context == VK_NULL_HANDLE)
19258  {
19259  pInfo->moveCount = 0;
19260  return VK_SUCCESS;
19261  }
19262 
19263  return allocator->DefragmentationPassBegin(pInfo, context);
19264 }
19265 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
19266  VmaAllocator allocator,
19267  VmaDefragmentationContext context)
19268 {
19269  VMA_ASSERT(allocator);
19270 
19271  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
19272  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19273 
19274  if(context == VK_NULL_HANDLE)
19275  return VK_SUCCESS;
19276 
19277  return allocator->DefragmentationPassEnd(context);
19278 }
19279 
19280 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
19281  VmaAllocator allocator,
19282  VmaAllocation allocation,
19283  VkBuffer buffer)
19284 {
19285  VMA_ASSERT(allocator && allocation && buffer);
19286 
19287  VMA_DEBUG_LOG("vmaBindBufferMemory");
19288 
19289  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19290 
19291  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
19292 }
19293 
19294 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
19295  VmaAllocator allocator,
19296  VmaAllocation allocation,
19297  VkDeviceSize allocationLocalOffset,
19298  VkBuffer buffer,
19299  const void* pNext)
19300 {
19301  VMA_ASSERT(allocator && allocation && buffer);
19302 
19303  VMA_DEBUG_LOG("vmaBindBufferMemory2");
19304 
19305  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19306 
19307  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
19308 }
19309 
19310 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
19311  VmaAllocator allocator,
19312  VmaAllocation allocation,
19313  VkImage image)
19314 {
19315  VMA_ASSERT(allocator && allocation && image);
19316 
19317  VMA_DEBUG_LOG("vmaBindImageMemory");
19318 
19319  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19320 
19321  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
19322 }
19323 
19324 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
19325  VmaAllocator allocator,
19326  VmaAllocation allocation,
19327  VkDeviceSize allocationLocalOffset,
19328  VkImage image,
19329  const void* pNext)
19330 {
19331  VMA_ASSERT(allocator && allocation && image);
19332 
19333  VMA_DEBUG_LOG("vmaBindImageMemory2");
19334 
19335  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19336 
19337  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
19338 }
19339 
19340 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
19341  VmaAllocator allocator,
19342  const VkBufferCreateInfo* pBufferCreateInfo,
19343  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19344  VkBuffer* pBuffer,
19345  VmaAllocation* pAllocation,
19346  VmaAllocationInfo* pAllocationInfo)
19347 {
19348  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
19349 
19350  if(pBufferCreateInfo->size == 0)
19351  {
19352  return VK_ERROR_VALIDATION_FAILED_EXT;
19353  }
19354  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
19355  !allocator->m_UseKhrBufferDeviceAddress)
19356  {
19357  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
19358  return VK_ERROR_VALIDATION_FAILED_EXT;
19359  }
19360 
19361  VMA_DEBUG_LOG("vmaCreateBuffer");
19362 
19363  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19364 
19365  *pBuffer = VK_NULL_HANDLE;
19366  *pAllocation = VK_NULL_HANDLE;
19367 
19368  // 1. Create VkBuffer.
19369  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
19370  allocator->m_hDevice,
19371  pBufferCreateInfo,
19372  allocator->GetAllocationCallbacks(),
19373  pBuffer);
19374  if(res >= 0)
19375  {
19376  // 2. vkGetBufferMemoryRequirements.
19377  VkMemoryRequirements vkMemReq = {};
19378  bool requiresDedicatedAllocation = false;
19379  bool prefersDedicatedAllocation = false;
19380  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
19381  requiresDedicatedAllocation, prefersDedicatedAllocation);
19382 
19383  // 3. Allocate memory using allocator.
19384  res = allocator->AllocateMemory(
19385  vkMemReq,
19386  requiresDedicatedAllocation,
19387  prefersDedicatedAllocation,
19388  *pBuffer, // dedicatedBuffer
19389  pBufferCreateInfo->usage, // dedicatedBufferUsage
19390  VK_NULL_HANDLE, // dedicatedImage
19391  *pAllocationCreateInfo,
19392  VMA_SUBALLOCATION_TYPE_BUFFER,
19393  1, // allocationCount
19394  pAllocation);
19395 
19396 #if VMA_RECORDING_ENABLED
19397  if(allocator->GetRecorder() != VMA_NULL)
19398  {
19399  allocator->GetRecorder()->RecordCreateBuffer(
19400  allocator->GetCurrentFrameIndex(),
19401  *pBufferCreateInfo,
19402  *pAllocationCreateInfo,
19403  *pAllocation);
19404  }
19405 #endif
19406 
19407  if(res >= 0)
19408  {
19409  // 3. Bind buffer with memory.
19410  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19411  {
19412  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
19413  }
19414  if(res >= 0)
19415  {
19416  // All steps succeeded.
19417  #if VMA_STATS_STRING_ENABLED
19418  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
19419  #endif
19420  if(pAllocationInfo != VMA_NULL)
19421  {
19422  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19423  }
19424 
19425  return VK_SUCCESS;
19426  }
19427  allocator->FreeMemory(
19428  1, // allocationCount
19429  pAllocation);
19430  *pAllocation = VK_NULL_HANDLE;
19431  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19432  *pBuffer = VK_NULL_HANDLE;
19433  return res;
19434  }
19435  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19436  *pBuffer = VK_NULL_HANDLE;
19437  return res;
19438  }
19439  return res;
19440 }
19441 
19442 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
19443  VmaAllocator allocator,
19444  VkBuffer buffer,
19445  VmaAllocation allocation)
19446 {
19447  VMA_ASSERT(allocator);
19448 
19449  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19450  {
19451  return;
19452  }
19453 
19454  VMA_DEBUG_LOG("vmaDestroyBuffer");
19455 
19456  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19457 
19458 #if VMA_RECORDING_ENABLED
19459  if(allocator->GetRecorder() != VMA_NULL)
19460  {
19461  allocator->GetRecorder()->RecordDestroyBuffer(
19462  allocator->GetCurrentFrameIndex(),
19463  allocation);
19464  }
19465 #endif
19466 
19467  if(buffer != VK_NULL_HANDLE)
19468  {
19469  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
19470  }
19471 
19472  if(allocation != VK_NULL_HANDLE)
19473  {
19474  allocator->FreeMemory(
19475  1, // allocationCount
19476  &allocation);
19477  }
19478 }
19479 
19480 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
19481  VmaAllocator allocator,
19482  const VkImageCreateInfo* pImageCreateInfo,
19483  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19484  VkImage* pImage,
19485  VmaAllocation* pAllocation,
19486  VmaAllocationInfo* pAllocationInfo)
19487 {
19488  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
19489 
19490  if(pImageCreateInfo->extent.width == 0 ||
19491  pImageCreateInfo->extent.height == 0 ||
19492  pImageCreateInfo->extent.depth == 0 ||
19493  pImageCreateInfo->mipLevels == 0 ||
19494  pImageCreateInfo->arrayLayers == 0)
19495  {
19496  return VK_ERROR_VALIDATION_FAILED_EXT;
19497  }
19498 
19499  VMA_DEBUG_LOG("vmaCreateImage");
19500 
19501  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19502 
19503  *pImage = VK_NULL_HANDLE;
19504  *pAllocation = VK_NULL_HANDLE;
19505 
19506  // 1. Create VkImage.
19507  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
19508  allocator->m_hDevice,
19509  pImageCreateInfo,
19510  allocator->GetAllocationCallbacks(),
19511  pImage);
19512  if(res >= 0)
19513  {
19514  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
19515  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
19516  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
19517 
19518  // 2. Allocate memory using allocator.
19519  VkMemoryRequirements vkMemReq = {};
19520  bool requiresDedicatedAllocation = false;
19521  bool prefersDedicatedAllocation = false;
19522  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
19523  requiresDedicatedAllocation, prefersDedicatedAllocation);
19524 
19525  res = allocator->AllocateMemory(
19526  vkMemReq,
19527  requiresDedicatedAllocation,
19528  prefersDedicatedAllocation,
19529  VK_NULL_HANDLE, // dedicatedBuffer
19530  UINT32_MAX, // dedicatedBufferUsage
19531  *pImage, // dedicatedImage
19532  *pAllocationCreateInfo,
19533  suballocType,
19534  1, // allocationCount
19535  pAllocation);
19536 
19537 #if VMA_RECORDING_ENABLED
19538  if(allocator->GetRecorder() != VMA_NULL)
19539  {
19540  allocator->GetRecorder()->RecordCreateImage(
19541  allocator->GetCurrentFrameIndex(),
19542  *pImageCreateInfo,
19543  *pAllocationCreateInfo,
19544  *pAllocation);
19545  }
19546 #endif
19547 
19548  if(res >= 0)
19549  {
19550  // 3. Bind image with memory.
19551  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19552  {
19553  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
19554  }
19555  if(res >= 0)
19556  {
19557  // All steps succeeded.
19558  #if VMA_STATS_STRING_ENABLED
19559  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
19560  #endif
19561  if(pAllocationInfo != VMA_NULL)
19562  {
19563  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19564  }
19565 
19566  return VK_SUCCESS;
19567  }
19568  allocator->FreeMemory(
19569  1, // allocationCount
19570  pAllocation);
19571  *pAllocation = VK_NULL_HANDLE;
19572  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19573  *pImage = VK_NULL_HANDLE;
19574  return res;
19575  }
19576  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19577  *pImage = VK_NULL_HANDLE;
19578  return res;
19579  }
19580  return res;
19581 }
19582 
19583 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
19584  VmaAllocator allocator,
19585  VkImage image,
19586  VmaAllocation allocation)
19587 {
19588  VMA_ASSERT(allocator);
19589 
19590  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19591  {
19592  return;
19593  }
19594 
19595  VMA_DEBUG_LOG("vmaDestroyImage");
19596 
19597  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19598 
19599 #if VMA_RECORDING_ENABLED
19600  if(allocator->GetRecorder() != VMA_NULL)
19601  {
19602  allocator->GetRecorder()->RecordDestroyImage(
19603  allocator->GetCurrentFrameIndex(),
19604  allocation);
19605  }
19606 #endif
19607 
19608  if(image != VK_NULL_HANDLE)
19609  {
19610  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
19611  }
19612  if(allocation != VK_NULL_HANDLE)
19613  {
19614  allocator->FreeMemory(
19615  1, // allocationCount
19616  &allocation);
19617  }
19618 }
19619 
19620 #endif // #ifdef VMA_IMPLEMENTATION
Definition: vk_mem_alloc.h:2879
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2905
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2911
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2897
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2918
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2892
float priority
A floating-point value between 0 and 1, indicating the priority of the allocation relative to other m...
Definition: vk_mem_alloc.h:2925
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2887
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2881
Represents single memory allocation.
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:3246
VkDeviceSize offset
Offset in VkDeviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:3270
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:3290
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:3251
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:3281
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:3295
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:3260
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2413
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2418
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2444
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2469
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2415
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null.
Definition: vk_mem_alloc.h:2475
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2427
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2487
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2424
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2482
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2421
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2496
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2430
Represents main object of this library initialized.
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2511
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2526
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2516
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2521
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2617
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2620
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2631
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2641
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2652
Represents Opaque object that represents started defragmentation process.
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3645
const VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3685
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3651
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3705
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3700
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3648
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3666
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3669
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3714
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3695
const VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3660
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3690
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3736
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3746
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3741
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3727
uint32_t moveCount
Definition: vk_mem_alloc.h:3728
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3729
Definition: vk_mem_alloc.h:3717
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3719
VkDeviceSize offset
Definition: vk_mem_alloc.h:3720
VmaAllocation allocation
Definition: vk_mem_alloc.h:3718
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3750
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3758
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3752
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3754
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3756
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:2222
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:2228
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:2224
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:2226
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:3047
float priority
A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relat...
Definition: vk_mem_alloc.h:3095
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:3050
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:3053
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:3089
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:3062
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:3067
VkDeviceSize minAllocationAlignment
Additional minimum alignment to be used for all allocations created from this pool....
Definition: vk_mem_alloc.h:3102
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:3075
void * pMemoryAllocateNext
Additional pNext chain to be attached to VkMemoryAllocateInfo used for every allocation made by this ...
Definition: vk_mem_alloc.h:3112
Represents custom memory pool.
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:3117
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:3120
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:3139
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:3136
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:3126
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3123
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3129
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2398
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2408
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2400
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2578
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2589
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2589
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2588
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2590
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2582
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2590
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2586
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2580
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2589
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2584
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2590
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2595
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2597
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2596
VmaStatInfo total
Definition: vk_mem_alloc.h:2598
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2352
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2362
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2367
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2355
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2359
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2364
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2356
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2363
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2360
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2354
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2353
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2366
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2368
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2361
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2357
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2358
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2369
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2365
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:2208
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:2029
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
struct VmaStats VmaStats
General statistics from current state of Allocator.
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:3043
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VkResult vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2384
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2392
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2390
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:2232
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2307
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:2237
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2289
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2325
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2277
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2262
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2344
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
Definition: vk_mem_alloc.h:2342
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2876
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
void vmaFreeMemory(VmaAllocator allocator, const VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3635
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3636
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3637
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:2201
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, const VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3639
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2987
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:3022
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3041
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:3033
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:3005
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:3037
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VkResult vmaDefragment(VmaAllocator allocator, const VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
VmaMemoryUsage
Definition: vk_mem_alloc.h:2700
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2763
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2731
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2753
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2747
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2761
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2738
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2721
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2704
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VkResult vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
VkResult vmaInvalidateAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Invalidates memory of given set of allocations.
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
VkResult vmaFlushAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Flushes memory of given set of allocations.
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2346
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2767
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2862
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2798
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2835
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2855
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2774
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2829
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2811
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2865
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2818
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2844
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2785
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2859
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2869
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2824
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2839
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2848
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2874
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2394
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.