Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
2022 #ifdef __cplusplus
2023 extern "C" {
2024 #endif
2025 
2026 /*
2027 Define this macro to 0/1 to disable/enable support for recording functionality,
2028 available through VmaAllocatorCreateInfo::pRecordSettings.
2029 */
2030 #ifndef VMA_RECORDING_ENABLED
2031  #define VMA_RECORDING_ENABLED 0
2032 #endif
2033 
2034 #if !defined(NOMINMAX) && defined(VMA_IMPLEMENTATION)
2035  #define NOMINMAX // For windows.h
2036 #endif
2037 
2038 #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
2039  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
2040  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
2041  extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
2042  extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
2043  extern PFN_vkAllocateMemory vkAllocateMemory;
2044  extern PFN_vkFreeMemory vkFreeMemory;
2045  extern PFN_vkMapMemory vkMapMemory;
2046  extern PFN_vkUnmapMemory vkUnmapMemory;
2047  extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
2048  extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
2049  extern PFN_vkBindBufferMemory vkBindBufferMemory;
2050  extern PFN_vkBindImageMemory vkBindImageMemory;
2051  extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
2052  extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
2053  extern PFN_vkCreateBuffer vkCreateBuffer;
2054  extern PFN_vkDestroyBuffer vkDestroyBuffer;
2055  extern PFN_vkCreateImage vkCreateImage;
2056  extern PFN_vkDestroyImage vkDestroyImage;
2057  extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
2058  #if VMA_VULKAN_VERSION >= 1001000
2059  extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
2060  extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
2061  extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
2062  extern PFN_vkBindImageMemory2 vkBindImageMemory2;
2063  extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
2064  #endif // #if VMA_VULKAN_VERSION >= 1001000
2065 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
2066 
2067 #ifndef VULKAN_H_
2068  #include <vulkan/vulkan.h>
2069 #endif
2070 
2071 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
2072 // where AAA = major, BBB = minor, CCC = patch.
2073 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
2074 #if !defined(VMA_VULKAN_VERSION)
2075  #if defined(VK_VERSION_1_2)
2076  #define VMA_VULKAN_VERSION 1002000
2077  #elif defined(VK_VERSION_1_1)
2078  #define VMA_VULKAN_VERSION 1001000
2079  #else
2080  #define VMA_VULKAN_VERSION 1000000
2081  #endif
2082 #endif
2083 
2084 #if !defined(VMA_DEDICATED_ALLOCATION)
2085  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
2086  #define VMA_DEDICATED_ALLOCATION 1
2087  #else
2088  #define VMA_DEDICATED_ALLOCATION 0
2089  #endif
2090 #endif
2091 
2092 #if !defined(VMA_BIND_MEMORY2)
2093  #if VK_KHR_bind_memory2
2094  #define VMA_BIND_MEMORY2 1
2095  #else
2096  #define VMA_BIND_MEMORY2 0
2097  #endif
2098 #endif
2099 
2100 #if !defined(VMA_MEMORY_BUDGET)
2101  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
2102  #define VMA_MEMORY_BUDGET 1
2103  #else
2104  #define VMA_MEMORY_BUDGET 0
2105  #endif
2106 #endif
2107 
2108 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
2109 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
2110  #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
2111  #define VMA_BUFFER_DEVICE_ADDRESS 1
2112  #else
2113  #define VMA_BUFFER_DEVICE_ADDRESS 0
2114  #endif
2115 #endif
2116 
2117 // Define these macros to decorate all public functions with additional code,
2118 // before and after returned type, appropriately. This may be useful for
2119 // exporting the functions when compiling VMA as a separate library. Example:
2120 // #define VMA_CALL_PRE __declspec(dllexport)
2121 // #define VMA_CALL_POST __cdecl
2122 #ifndef VMA_CALL_PRE
2123  #define VMA_CALL_PRE
2124 #endif
2125 #ifndef VMA_CALL_POST
2126  #define VMA_CALL_POST
2127 #endif
2128 
2129 // Define this macro to decorate pointers with an attribute specifying the
2130 // length of the array they point to if they are not null.
2131 //
2132 // The length may be one of
2133 // - The name of another parameter in the argument list where the pointer is declared
2134 // - The name of another member in the struct where the pointer is declared
2135 // - The name of a member of a struct type, meaning the value of that member in
2136 // the context of the call. For example
2137 // VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
2138 // this means the number of memory heaps available in the device associated
2139 // with the VmaAllocator being dealt with.
2140 #ifndef VMA_LEN_IF_NOT_NULL
2141  #define VMA_LEN_IF_NOT_NULL(len)
2142 #endif
2143 
2144 // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
2145 // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
2146 #ifndef VMA_NULLABLE
2147  #ifdef __clang__
2148  #define VMA_NULLABLE _Nullable
2149  #else
2150  #define VMA_NULLABLE
2151  #endif
2152 #endif
2153 
2154 // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
2155 // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
2156 #ifndef VMA_NOT_NULL
2157  #ifdef __clang__
2158  #define VMA_NOT_NULL _Nonnull
2159  #else
2160  #define VMA_NOT_NULL
2161  #endif
2162 #endif
2163 
2164 // If non-dispatchable handles are represented as pointers then we can give
2165 // then nullability annotations
2166 #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
2167  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2168  #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
2169  #else
2170  #define VMA_NOT_NULL_NON_DISPATCHABLE
2171  #endif
2172 #endif
2173 
2174 #ifndef VMA_NULLABLE_NON_DISPATCHABLE
2175  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2176  #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
2177  #else
2178  #define VMA_NULLABLE_NON_DISPATCHABLE
2179  #endif
2180 #endif
2181 
2191 VK_DEFINE_HANDLE(VmaAllocator)
2192 
2193 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
2195  VmaAllocator VMA_NOT_NULL allocator,
2196  uint32_t memoryType,
2197  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2198  VkDeviceSize size,
2199  void* VMA_NULLABLE pUserData);
2201 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
2202  VmaAllocator VMA_NOT_NULL allocator,
2203  uint32_t memoryType,
2204  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2205  VkDeviceSize size,
2206  void* VMA_NULLABLE pUserData);
2207 
2221  void* VMA_NULLABLE pUserData;
2223 
2319 
2322 typedef VkFlags VmaAllocatorCreateFlags;
2323 
2328 typedef struct VmaVulkanFunctions {
2329  PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
2330  PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
2331  PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
2332  PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
2333  PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
2334  PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
2335  PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
2336  PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
2337  PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
2338  PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
2339  PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
2340  PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
2341  PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
2342  PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
2343  PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
2344  PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
2345  PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
2346 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2347  PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
2348  PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
2349 #endif
2350 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2351  PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
2352  PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
2353 #endif
2354 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2355  PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
2356 #endif
2358 
2360 typedef enum VmaRecordFlagBits {
2367 
2368  VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
2370 typedef VkFlags VmaRecordFlags;
2371 
2373 typedef struct VmaRecordSettings
2374 {
2384  const char* VMA_NOT_NULL pFilePath;
2386 
2389 {
2393 
2394  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2396 
2397  VkDevice VMA_NOT_NULL device;
2399 
2402 
2403  const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
2405 
2445  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
2446 
2458  const VmaRecordSettings* VMA_NULLABLE pRecordSettings;
2463  VkInstance VMA_NOT_NULL instance;
2474 
2476 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2477  const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
2478  VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator);
2479 
2481 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2482  VmaAllocator VMA_NULLABLE allocator);
2483 
2486 typedef struct VmaAllocatorInfo
2487 {
2492  VkInstance VMA_NOT_NULL instance;
2497  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2502  VkDevice VMA_NOT_NULL device;
2504 
2510 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
2511 
2516 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2517  VmaAllocator VMA_NOT_NULL allocator,
2518  const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties);
2519 
2524 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2525  VmaAllocator VMA_NOT_NULL allocator,
2526  const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
2527 
2534 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2535  VmaAllocator VMA_NOT_NULL allocator,
2536  uint32_t memoryTypeIndex,
2537  VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
2538 
2547 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2548  VmaAllocator VMA_NOT_NULL allocator,
2549  uint32_t frameIndex);
2550 
2553 typedef struct VmaStatInfo
2554 {
2556  uint32_t blockCount;
2562  VkDeviceSize usedBytes;
2564  VkDeviceSize unusedBytes;
2565  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
2566  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
2568 
2570 typedef struct VmaStats
2571 {
2572  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2573  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2576 
2586 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2587  VmaAllocator VMA_NOT_NULL allocator,
2588  VmaStats* VMA_NOT_NULL pStats);
2589 
2592 typedef struct VmaBudget
2593 {
2596  VkDeviceSize blockBytes;
2597 
2607  VkDeviceSize allocationBytes;
2608 
2617  VkDeviceSize usage;
2618 
2628  VkDeviceSize budget;
2630 
2641 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2642  VmaAllocator VMA_NOT_NULL allocator,
2643  VmaBudget* VMA_NOT_NULL pBudget);
2644 
2645 #ifndef VMA_STATS_STRING_ENABLED
2646 #define VMA_STATS_STRING_ENABLED 1
2647 #endif
2648 
2649 #if VMA_STATS_STRING_ENABLED
2650 
2652 
2654 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2655  VmaAllocator VMA_NOT_NULL allocator,
2656  char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString,
2657  VkBool32 detailedMap);
2658 
2659 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2660  VmaAllocator VMA_NOT_NULL allocator,
2661  char* VMA_NULLABLE pStatsString);
2662 
2663 #endif // #if VMA_STATS_STRING_ENABLED
2664 
2673 VK_DEFINE_HANDLE(VmaPool)
2674 
2675 typedef enum VmaMemoryUsage
2676 {
2738 
2739  VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
2741 
2751 
2816 
2832 
2842 
2849 
2853 
2855 {
2868  VkMemoryPropertyFlags requiredFlags;
2873  VkMemoryPropertyFlags preferredFlags;
2881  uint32_t memoryTypeBits;
2887  VmaPool VMA_NULLABLE pool;
2894  void* VMA_NULLABLE pUserData;
2896 
2913 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2914  VmaAllocator VMA_NOT_NULL allocator,
2915  uint32_t memoryTypeBits,
2916  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2917  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2918 
2931 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2932  VmaAllocator VMA_NOT_NULL allocator,
2933  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2934  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2935  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2936 
2949 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2950  VmaAllocator VMA_NOT_NULL allocator,
2951  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2952  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2953  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2954 
2975 
2992 
3003 
3009 
3012 typedef VkFlags VmaPoolCreateFlags;
3013 
3016 typedef struct VmaPoolCreateInfo {
3031  VkDeviceSize blockSize;
3060 
3063 typedef struct VmaPoolStats {
3066  VkDeviceSize size;
3069  VkDeviceSize unusedSize;
3082  VkDeviceSize unusedRangeSizeMax;
3085  size_t blockCount;
3087 
3094 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
3095  VmaAllocator VMA_NOT_NULL allocator,
3096  const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
3097  VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool);
3098 
3101 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
3102  VmaAllocator VMA_NOT_NULL allocator,
3103  VmaPool VMA_NULLABLE pool);
3104 
3111 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
3112  VmaAllocator VMA_NOT_NULL allocator,
3113  VmaPool VMA_NOT_NULL pool,
3114  VmaPoolStats* VMA_NOT_NULL pPoolStats);
3115 
3122 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
3123  VmaAllocator VMA_NOT_NULL allocator,
3124  VmaPool VMA_NOT_NULL pool,
3125  size_t* VMA_NULLABLE pLostAllocationCount);
3126 
3141 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool);
3142 
3149 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
3150  VmaAllocator VMA_NOT_NULL allocator,
3151  VmaPool VMA_NOT_NULL pool,
3152  const char* VMA_NULLABLE * VMA_NOT_NULL ppName);
3153 
3159 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
3160  VmaAllocator VMA_NOT_NULL allocator,
3161  VmaPool VMA_NOT_NULL pool,
3162  const char* VMA_NULLABLE pName);
3163 
3188 VK_DEFINE_HANDLE(VmaAllocation)
3189 
3190 
3192 typedef struct VmaAllocationInfo {
3197  uint32_t memoryType;
3206  VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
3216  VkDeviceSize offset;
3227  VkDeviceSize size;
3236  void* VMA_NULLABLE pMappedData;
3241  void* VMA_NULLABLE pUserData;
3243 
3254 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3255  VmaAllocator VMA_NOT_NULL allocator,
3256  const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
3257  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3258  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3259  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3260 
3280 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3281  VmaAllocator VMA_NOT_NULL allocator,
3282  const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
3283  const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
3284  size_t allocationCount,
3285  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3286  VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
3287 
3294 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3295  VmaAllocator VMA_NOT_NULL allocator,
3296  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3297  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3298  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3299  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3300 
3302 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3303  VmaAllocator VMA_NOT_NULL allocator,
3304  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3305  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3306  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3307  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3308 
3313 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3314  VmaAllocator VMA_NOT_NULL allocator,
3315  const VmaAllocation VMA_NULLABLE allocation);
3316 
3327 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3328  VmaAllocator VMA_NOT_NULL allocator,
3329  size_t allocationCount,
3330  const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
3331 
3339 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
3340  VmaAllocator VMA_NOT_NULL allocator,
3341  VmaAllocation VMA_NOT_NULL allocation,
3342  VkDeviceSize newSize);
3343 
3360 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3361  VmaAllocator VMA_NOT_NULL allocator,
3362  VmaAllocation VMA_NOT_NULL allocation,
3363  VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
3364 
3379 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3380  VmaAllocator VMA_NOT_NULL allocator,
3381  VmaAllocation VMA_NOT_NULL allocation);
3382 
3396 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3397  VmaAllocator VMA_NOT_NULL allocator,
3398  VmaAllocation VMA_NOT_NULL allocation,
3399  void* VMA_NULLABLE pUserData);
3400 
3411 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3412  VmaAllocator VMA_NOT_NULL allocator,
3413  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation);
3414 
3453 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3454  VmaAllocator VMA_NOT_NULL allocator,
3455  VmaAllocation VMA_NOT_NULL allocation,
3456  void* VMA_NULLABLE * VMA_NOT_NULL ppData);
3457 
3466 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3467  VmaAllocator VMA_NOT_NULL allocator,
3468  VmaAllocation VMA_NOT_NULL allocation);
3469 
3491 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
3492  VmaAllocator VMA_NOT_NULL allocator,
3493  VmaAllocation VMA_NOT_NULL allocation,
3494  VkDeviceSize offset,
3495  VkDeviceSize size);
3496 
3518 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
3519  VmaAllocator VMA_NOT_NULL allocator,
3520  VmaAllocation VMA_NOT_NULL allocation,
3521  VkDeviceSize offset,
3522  VkDeviceSize size);
3523 
3538 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
3539  VmaAllocator VMA_NOT_NULL allocator,
3540  uint32_t allocationCount,
3541  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3542  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3543  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3544 
3559 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
3560  VmaAllocator VMA_NOT_NULL allocator,
3561  uint32_t allocationCount,
3562  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3563  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3564  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3565 
3582 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits);
3583 
3590 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3591 
3592 typedef enum VmaDefragmentationFlagBits {
3597 typedef VkFlags VmaDefragmentationFlags;
3598 
3603 typedef struct VmaDefragmentationInfo2 {
3618  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
3624  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged;
3627  uint32_t poolCount;
3643  const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
3648  VkDeviceSize maxCpuBytesToMove;
3658  VkDeviceSize maxGpuBytesToMove;
3672  VkCommandBuffer VMA_NULLABLE commandBuffer;
3674 
3677  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory;
3678  VkDeviceSize offset;
3680 
3686  uint32_t moveCount;
3687  VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
3689 
3694 typedef struct VmaDefragmentationInfo {
3699  VkDeviceSize maxBytesToMove;
3706 
3708 typedef struct VmaDefragmentationStats {
3710  VkDeviceSize bytesMoved;
3712  VkDeviceSize bytesFreed;
3718 
3748 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3749  VmaAllocator VMA_NOT_NULL allocator,
3750  const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo,
3751  VmaDefragmentationStats* VMA_NULLABLE pStats,
3752  VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext);
3753 
3759 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3760  VmaAllocator VMA_NOT_NULL allocator,
3761  VmaDefragmentationContext VMA_NULLABLE context);
3762 
3763 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3764  VmaAllocator VMA_NOT_NULL allocator,
3765  VmaDefragmentationContext VMA_NULLABLE context,
3766  VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo
3767 );
3768 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3769  VmaAllocator VMA_NOT_NULL allocator,
3770  VmaDefragmentationContext VMA_NULLABLE context
3771 );
3772 
3813 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3814  VmaAllocator VMA_NOT_NULL allocator,
3815  const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3816  size_t allocationCount,
3817  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
3818  const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo,
3819  VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats);
3820 
3833 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3834  VmaAllocator VMA_NOT_NULL allocator,
3835  VmaAllocation VMA_NOT_NULL allocation,
3836  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
3837 
3848 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3849  VmaAllocator VMA_NOT_NULL allocator,
3850  VmaAllocation VMA_NOT_NULL allocation,
3851  VkDeviceSize allocationLocalOffset,
3852  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3853  const void* VMA_NULLABLE pNext);
3854 
3867 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3868  VmaAllocator VMA_NOT_NULL allocator,
3869  VmaAllocation VMA_NOT_NULL allocation,
3870  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
3871 
3882 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3883  VmaAllocator VMA_NOT_NULL allocator,
3884  VmaAllocation VMA_NOT_NULL allocation,
3885  VkDeviceSize allocationLocalOffset,
3886  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3887  const void* VMA_NULLABLE pNext);
3888 
3919 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3920  VmaAllocator VMA_NOT_NULL allocator,
3921  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
3922  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3923  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
3924  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3925  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3926 
3938 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3939  VmaAllocator VMA_NOT_NULL allocator,
3940  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
3941  VmaAllocation VMA_NULLABLE allocation);
3942 
3944 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3945  VmaAllocator VMA_NOT_NULL allocator,
3946  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
3947  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3948  VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
3949  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3950  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3951 
3963 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3964  VmaAllocator VMA_NOT_NULL allocator,
3965  VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
3966  VmaAllocation VMA_NULLABLE allocation);
3967 
3968 #ifdef __cplusplus
3969 }
3970 #endif
3971 
3972 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3973 
3974 // For Visual Studio IntelliSense.
3975 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3976 #define VMA_IMPLEMENTATION
3977 #endif
3978 
3979 #ifdef VMA_IMPLEMENTATION
3980 #undef VMA_IMPLEMENTATION
3981 
3982 #include <cstdint>
3983 #include <cstdlib>
3984 #include <cstring>
3985 #include <utility>
3986 
3987 #if VMA_RECORDING_ENABLED
3988  #include <chrono>
3989  #if defined(_WIN32)
3990  #include <windows.h>
3991  #else
3992  #include <sstream>
3993  #include <thread>
3994  #endif
3995 #endif
3996 
3997 /*******************************************************************************
3998 CONFIGURATION SECTION
3999 
4000 Define some of these macros before each #include of this header or change them
4001 here if you need other then default behavior depending on your environment.
4002 */
4003 
4004 /*
4005 Define this macro to 1 to make the library fetch pointers to Vulkan functions
4006 internally, like:
4007 
4008  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
4009 */
4010 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
4011  #define VMA_STATIC_VULKAN_FUNCTIONS 1
4012 #endif
4013 
4014 /*
4015 Define this macro to 1 to make the library fetch pointers to Vulkan functions
4016 internally, like:
4017 
4018  vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory);
4019 */
4020 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
4021  #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
4022  #if defined(VK_NO_PROTOTYPES)
4023  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
4024  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
4025  #endif
4026 #endif
4027 
4028 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
4029 //#define VMA_USE_STL_CONTAINERS 1
4030 
4031 /* Set this macro to 1 to make the library including and using STL containers:
4032 std::pair, std::vector, std::list, std::unordered_map.
4033 
4034 Set it to 0 or undefined to make the library using its own implementation of
4035 the containers.
4036 */
4037 #if VMA_USE_STL_CONTAINERS
4038  #define VMA_USE_STL_VECTOR 1
4039  #define VMA_USE_STL_UNORDERED_MAP 1
4040  #define VMA_USE_STL_LIST 1
4041 #endif
4042 
4043 #ifndef VMA_USE_STL_SHARED_MUTEX
4044  // Compiler conforms to C++17.
4045  #if __cplusplus >= 201703L
4046  #define VMA_USE_STL_SHARED_MUTEX 1
4047  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
4048  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
4049  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
4050  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
4051  #define VMA_USE_STL_SHARED_MUTEX 1
4052  #else
4053  #define VMA_USE_STL_SHARED_MUTEX 0
4054  #endif
4055 #endif
4056 
4057 /*
4058 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
4059 Library has its own container implementation.
4060 */
4061 #if VMA_USE_STL_VECTOR
4062  #include <vector>
4063 #endif
4064 
4065 #if VMA_USE_STL_UNORDERED_MAP
4066  #include <unordered_map>
4067 #endif
4068 
4069 #if VMA_USE_STL_LIST
4070  #include <list>
4071 #endif
4072 
4073 /*
4074 Following headers are used in this CONFIGURATION section only, so feel free to
4075 remove them if not needed.
4076 */
4077 #include <cassert> // for assert
4078 #include <algorithm> // for min, max
4079 #include <mutex>
4080 
4081 #ifndef VMA_NULL
4082  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
4083  #define VMA_NULL nullptr
4084 #endif
4085 
4086 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
4087 #include <cstdlib>
4088 static void* vma_aligned_alloc(size_t alignment, size_t size)
4089 {
4090  // alignment must be >= sizeof(void*)
4091  if(alignment < sizeof(void*))
4092  {
4093  alignment = sizeof(void*);
4094  }
4095 
4096  return memalign(alignment, size);
4097 }
4098 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
4099 #include <cstdlib>
4100 
4101 #if defined(__APPLE__)
4102 #include <AvailabilityMacros.h>
4103 #endif
4104 
4105 static void* vma_aligned_alloc(size_t alignment, size_t size)
4106 {
4107 #if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
4108 #if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
4109  // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
4110  // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
4111  // MAC_OS_X_VERSION_10_16), even though the function is marked
4112  // availabe for 10.15. That's why the preprocessor checks for 10.16 but
4113  // the __builtin_available checks for 10.15.
4114  // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
4115  if (__builtin_available(macOS 10.15, iOS 13, *))
4116  return aligned_alloc(alignment, size);
4117 #endif
4118 #endif
4119  // alignment must be >= sizeof(void*)
4120  if(alignment < sizeof(void*))
4121  {
4122  alignment = sizeof(void*);
4123  }
4124 
4125  void *pointer;
4126  if(posix_memalign(&pointer, alignment, size) == 0)
4127  return pointer;
4128  return VMA_NULL;
4129 }
4130 #elif defined(_WIN32)
4131 static void* vma_aligned_alloc(size_t alignment, size_t size)
4132 {
4133  return _aligned_malloc(size, alignment);
4134 }
4135 #else
4136 static void* vma_aligned_alloc(size_t alignment, size_t size)
4137 {
4138  return aligned_alloc(alignment, size);
4139 }
4140 #endif
4141 
4142 #if defined(_WIN32)
4143 static void vma_aligned_free(void* ptr)
4144 {
4145  _aligned_free(ptr);
4146 }
4147 #else
4148 static void vma_aligned_free(void* ptr)
4149 {
4150  free(ptr);
4151 }
4152 #endif
4153 
4154 // If your compiler is not compatible with C++11 and definition of
4155 // aligned_alloc() function is missing, uncommeting following line may help:
4156 
4157 //#include <malloc.h>
4158 
4159 // Normal assert to check for programmer's errors, especially in Debug configuration.
4160 #ifndef VMA_ASSERT
4161  #ifdef NDEBUG
4162  #define VMA_ASSERT(expr)
4163  #else
4164  #define VMA_ASSERT(expr) assert(expr)
4165  #endif
4166 #endif
4167 
4168 // Assert that will be called very often, like inside data structures e.g. operator[].
4169 // Making it non-empty can make program slow.
4170 #ifndef VMA_HEAVY_ASSERT
4171  #ifdef NDEBUG
4172  #define VMA_HEAVY_ASSERT(expr)
4173  #else
4174  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
4175  #endif
4176 #endif
4177 
4178 #ifndef VMA_ALIGN_OF
4179  #define VMA_ALIGN_OF(type) (__alignof(type))
4180 #endif
4181 
4182 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
4183  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
4184 #endif
4185 
4186 #ifndef VMA_SYSTEM_ALIGNED_FREE
4187  // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
4188  #if defined(VMA_SYSTEM_FREE)
4189  #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
4190  #else
4191  #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
4192  #endif
4193 #endif
4194 
4195 #ifndef VMA_MIN
4196  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
4197 #endif
4198 
4199 #ifndef VMA_MAX
4200  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
4201 #endif
4202 
4203 #ifndef VMA_SWAP
4204  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
4205 #endif
4206 
4207 #ifndef VMA_SORT
4208  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
4209 #endif
4210 
4211 #ifndef VMA_DEBUG_LOG
4212  #define VMA_DEBUG_LOG(format, ...)
4213  /*
4214  #define VMA_DEBUG_LOG(format, ...) do { \
4215  printf(format, __VA_ARGS__); \
4216  printf("\n"); \
4217  } while(false)
4218  */
4219 #endif
4220 
4221 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
4222 #if VMA_STATS_STRING_ENABLED
4223  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
4224  {
4225  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
4226  }
4227  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
4228  {
4229  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
4230  }
4231  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
4232  {
4233  snprintf(outStr, strLen, "%p", ptr);
4234  }
4235 #endif
4236 
4237 #ifndef VMA_MUTEX
4238  class VmaMutex
4239  {
4240  public:
4241  void Lock() { m_Mutex.lock(); }
4242  void Unlock() { m_Mutex.unlock(); }
4243  bool TryLock() { return m_Mutex.try_lock(); }
4244  private:
4245  std::mutex m_Mutex;
4246  };
4247  #define VMA_MUTEX VmaMutex
4248 #endif
4249 
4250 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
4251 #ifndef VMA_RW_MUTEX
4252  #if VMA_USE_STL_SHARED_MUTEX
4253  // Use std::shared_mutex from C++17.
4254  #include <shared_mutex>
4255  class VmaRWMutex
4256  {
4257  public:
4258  void LockRead() { m_Mutex.lock_shared(); }
4259  void UnlockRead() { m_Mutex.unlock_shared(); }
4260  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
4261  void LockWrite() { m_Mutex.lock(); }
4262  void UnlockWrite() { m_Mutex.unlock(); }
4263  bool TryLockWrite() { return m_Mutex.try_lock(); }
4264  private:
4265  std::shared_mutex m_Mutex;
4266  };
4267  #define VMA_RW_MUTEX VmaRWMutex
4268  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
4269  // Use SRWLOCK from WinAPI.
4270  // Minimum supported client = Windows Vista, server = Windows Server 2008.
4271  class VmaRWMutex
4272  {
4273  public:
4274  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
4275  void LockRead() { AcquireSRWLockShared(&m_Lock); }
4276  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
4277  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
4278  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
4279  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
4280  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
4281  private:
4282  SRWLOCK m_Lock;
4283  };
4284  #define VMA_RW_MUTEX VmaRWMutex
4285  #else
4286  // Less efficient fallback: Use normal mutex.
4287  class VmaRWMutex
4288  {
4289  public:
4290  void LockRead() { m_Mutex.Lock(); }
4291  void UnlockRead() { m_Mutex.Unlock(); }
4292  bool TryLockRead() { return m_Mutex.TryLock(); }
4293  void LockWrite() { m_Mutex.Lock(); }
4294  void UnlockWrite() { m_Mutex.Unlock(); }
4295  bool TryLockWrite() { return m_Mutex.TryLock(); }
4296  private:
4297  VMA_MUTEX m_Mutex;
4298  };
4299  #define VMA_RW_MUTEX VmaRWMutex
4300  #endif // #if VMA_USE_STL_SHARED_MUTEX
4301 #endif // #ifndef VMA_RW_MUTEX
4302 
4303 /*
4304 If providing your own implementation, you need to implement a subset of std::atomic.
4305 */
4306 #ifndef VMA_ATOMIC_UINT32
4307  #include <atomic>
4308  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
4309 #endif
4310 
4311 #ifndef VMA_ATOMIC_UINT64
4312  #include <atomic>
4313  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
4314 #endif
4315 
4316 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
4317 
4321  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
4322 #endif
4323 
4324 #ifndef VMA_DEBUG_ALIGNMENT
4325 
4329  #define VMA_DEBUG_ALIGNMENT (1)
4330 #endif
4331 
4332 #ifndef VMA_DEBUG_MARGIN
4333 
4337  #define VMA_DEBUG_MARGIN (0)
4338 #endif
4339 
4340 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
4341 
4345  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
4346 #endif
4347 
4348 #ifndef VMA_DEBUG_DETECT_CORRUPTION
4349 
4354  #define VMA_DEBUG_DETECT_CORRUPTION (0)
4355 #endif
4356 
4357 #ifndef VMA_DEBUG_GLOBAL_MUTEX
4358 
4362  #define VMA_DEBUG_GLOBAL_MUTEX (0)
4363 #endif
4364 
4365 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4366 
4370  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
4371 #endif
4372 
4373 #ifndef VMA_SMALL_HEAP_MAX_SIZE
4374  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
4376 #endif
4377 
4378 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
4379  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
4381 #endif
4382 
4383 #ifndef VMA_CLASS_NO_COPY
4384  #define VMA_CLASS_NO_COPY(className) \
4385  private: \
4386  className(const className&) = delete; \
4387  className& operator=(const className&) = delete;
4388 #endif
4389 
4390 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4391 
4392 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4393 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4394 
4395 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4396 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4397 
4398 /*******************************************************************************
4399 END OF CONFIGURATION
4400 */
4401 
4402 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4403 
4404 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4405 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4406 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4407 
4408 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4409 
4410 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4411  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4412 
4413 // Returns number of bits set to 1 in (v).
4414 static inline uint32_t VmaCountBitsSet(uint32_t v)
4415 {
4416  uint32_t c = v - ((v >> 1) & 0x55555555);
4417  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4418  c = ((c >> 4) + c) & 0x0F0F0F0F;
4419  c = ((c >> 8) + c) & 0x00FF00FF;
4420  c = ((c >> 16) + c) & 0x0000FFFF;
4421  return c;
4422 }
4423 
4424 /*
4425 Returns true if given number is a power of two.
4426 T must be unsigned integer number or signed integer but always nonnegative.
4427 For 0 returns true.
4428 */
4429 template <typename T>
4430 inline bool VmaIsPow2(T x)
4431 {
4432  return (x & (x-1)) == 0;
4433 }
4434 
4435 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4436 // Use types like uint32_t, uint64_t as T.
4437 template <typename T>
4438 static inline T VmaAlignUp(T val, T alignment)
4439 {
4440  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4441  return (val + alignment - 1) & ~(alignment - 1);
4442 }
4443 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4444 // Use types like uint32_t, uint64_t as T.
4445 template <typename T>
4446 static inline T VmaAlignDown(T val, T alignment)
4447 {
4448  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4449  return val & ~(alignment - 1);
4450 }
4451 
4452 // Division with mathematical rounding to nearest number.
4453 template <typename T>
4454 static inline T VmaRoundDiv(T x, T y)
4455 {
4456  return (x + (y / (T)2)) / y;
4457 }
4458 
4459 // Returns smallest power of 2 greater or equal to v.
4460 static inline uint32_t VmaNextPow2(uint32_t v)
4461 {
4462  v--;
4463  v |= v >> 1;
4464  v |= v >> 2;
4465  v |= v >> 4;
4466  v |= v >> 8;
4467  v |= v >> 16;
4468  v++;
4469  return v;
4470 }
4471 static inline uint64_t VmaNextPow2(uint64_t v)
4472 {
4473  v--;
4474  v |= v >> 1;
4475  v |= v >> 2;
4476  v |= v >> 4;
4477  v |= v >> 8;
4478  v |= v >> 16;
4479  v |= v >> 32;
4480  v++;
4481  return v;
4482 }
4483 
4484 // Returns largest power of 2 less or equal to v.
4485 static inline uint32_t VmaPrevPow2(uint32_t v)
4486 {
4487  v |= v >> 1;
4488  v |= v >> 2;
4489  v |= v >> 4;
4490  v |= v >> 8;
4491  v |= v >> 16;
4492  v = v ^ (v >> 1);
4493  return v;
4494 }
4495 static inline uint64_t VmaPrevPow2(uint64_t v)
4496 {
4497  v |= v >> 1;
4498  v |= v >> 2;
4499  v |= v >> 4;
4500  v |= v >> 8;
4501  v |= v >> 16;
4502  v |= v >> 32;
4503  v = v ^ (v >> 1);
4504  return v;
4505 }
4506 
4507 static inline bool VmaStrIsEmpty(const char* pStr)
4508 {
4509  return pStr == VMA_NULL || *pStr == '\0';
4510 }
4511 
4512 #if VMA_STATS_STRING_ENABLED
4513 
4514 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4515 {
4516  switch(algorithm)
4517  {
4519  return "Linear";
4521  return "Buddy";
4522  case 0:
4523  return "Default";
4524  default:
4525  VMA_ASSERT(0);
4526  return "";
4527  }
4528 }
4529 
4530 #endif // #if VMA_STATS_STRING_ENABLED
4531 
4532 #ifndef VMA_SORT
4533 
4534 template<typename Iterator, typename Compare>
4535 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4536 {
4537  Iterator centerValue = end; --centerValue;
4538  Iterator insertIndex = beg;
4539  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4540  {
4541  if(cmp(*memTypeIndex, *centerValue))
4542  {
4543  if(insertIndex != memTypeIndex)
4544  {
4545  VMA_SWAP(*memTypeIndex, *insertIndex);
4546  }
4547  ++insertIndex;
4548  }
4549  }
4550  if(insertIndex != centerValue)
4551  {
4552  VMA_SWAP(*insertIndex, *centerValue);
4553  }
4554  return insertIndex;
4555 }
4556 
4557 template<typename Iterator, typename Compare>
4558 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4559 {
4560  if(beg < end)
4561  {
4562  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4563  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4564  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4565  }
4566 }
4567 
4568 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4569 
4570 #endif // #ifndef VMA_SORT
4571 
4572 /*
4573 Returns true if two memory blocks occupy overlapping pages.
4574 ResourceA must be in less memory offset than ResourceB.
4575 
4576 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4577 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4578 */
4579 static inline bool VmaBlocksOnSamePage(
4580  VkDeviceSize resourceAOffset,
4581  VkDeviceSize resourceASize,
4582  VkDeviceSize resourceBOffset,
4583  VkDeviceSize pageSize)
4584 {
4585  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4586  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4587  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4588  VkDeviceSize resourceBStart = resourceBOffset;
4589  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4590  return resourceAEndPage == resourceBStartPage;
4591 }
4592 
4593 enum VmaSuballocationType
4594 {
4595  VMA_SUBALLOCATION_TYPE_FREE = 0,
4596  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4597  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4598  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4599  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4600  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4601  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4602 };
4603 
4604 /*
4605 Returns true if given suballocation types could conflict and must respect
4606 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4607 or linear image and another one is optimal image. If type is unknown, behave
4608 conservatively.
4609 */
4610 static inline bool VmaIsBufferImageGranularityConflict(
4611  VmaSuballocationType suballocType1,
4612  VmaSuballocationType suballocType2)
4613 {
4614  if(suballocType1 > suballocType2)
4615  {
4616  VMA_SWAP(suballocType1, suballocType2);
4617  }
4618 
4619  switch(suballocType1)
4620  {
4621  case VMA_SUBALLOCATION_TYPE_FREE:
4622  return false;
4623  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4624  return true;
4625  case VMA_SUBALLOCATION_TYPE_BUFFER:
4626  return
4627  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4628  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4629  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4630  return
4631  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4632  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4633  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4634  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4635  return
4636  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4637  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4638  return false;
4639  default:
4640  VMA_ASSERT(0);
4641  return true;
4642  }
4643 }
4644 
4645 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4646 {
4647 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4648  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4649  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4650  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4651  {
4652  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4653  }
4654 #else
4655  // no-op
4656 #endif
4657 }
4658 
4659 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4660 {
4661 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4662  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4663  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4664  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4665  {
4666  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4667  {
4668  return false;
4669  }
4670  }
4671 #endif
4672  return true;
4673 }
4674 
4675 /*
4676 Fills structure with parameters of an example buffer to be used for transfers
4677 during GPU memory defragmentation.
4678 */
4679 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4680 {
4681  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4682  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4683  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4684  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4685 }
4686 
4687 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4688 struct VmaMutexLock
4689 {
4690  VMA_CLASS_NO_COPY(VmaMutexLock)
4691 public:
4692  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4693  m_pMutex(useMutex ? &mutex : VMA_NULL)
4694  { if(m_pMutex) { m_pMutex->Lock(); } }
4695  ~VmaMutexLock()
4696  { if(m_pMutex) { m_pMutex->Unlock(); } }
4697 private:
4698  VMA_MUTEX* m_pMutex;
4699 };
4700 
4701 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4702 struct VmaMutexLockRead
4703 {
4704  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4705 public:
4706  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4707  m_pMutex(useMutex ? &mutex : VMA_NULL)
4708  { if(m_pMutex) { m_pMutex->LockRead(); } }
4709  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4710 private:
4711  VMA_RW_MUTEX* m_pMutex;
4712 };
4713 
4714 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4715 struct VmaMutexLockWrite
4716 {
4717  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4718 public:
4719  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4720  m_pMutex(useMutex ? &mutex : VMA_NULL)
4721  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4722  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4723 private:
4724  VMA_RW_MUTEX* m_pMutex;
4725 };
4726 
4727 #if VMA_DEBUG_GLOBAL_MUTEX
4728  static VMA_MUTEX gDebugGlobalMutex;
4729  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4730 #else
4731  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4732 #endif
4733 
4734 // Minimum size of a free suballocation to register it in the free suballocation collection.
4735 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4736 
4737 /*
4738 Performs binary search and returns iterator to first element that is greater or
4739 equal to (key), according to comparison (cmp).
4740 
4741 Cmp should return true if first argument is less than second argument.
4742 
4743 Returned value is the found element, if present in the collection or place where
4744 new element with value (key) should be inserted.
4745 */
4746 template <typename CmpLess, typename IterT, typename KeyT>
4747 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4748 {
4749  size_t down = 0, up = (end - beg);
4750  while(down < up)
4751  {
4752  const size_t mid = (down + up) / 2;
4753  if(cmp(*(beg+mid), key))
4754  {
4755  down = mid + 1;
4756  }
4757  else
4758  {
4759  up = mid;
4760  }
4761  }
4762  return beg + down;
4763 }
4764 
4765 template<typename CmpLess, typename IterT, typename KeyT>
4766 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4767 {
4768  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4769  beg, end, value, cmp);
4770  if(it == end ||
4771  (!cmp(*it, value) && !cmp(value, *it)))
4772  {
4773  return it;
4774  }
4775  return end;
4776 }
4777 
4778 /*
4779 Returns true if all pointers in the array are not-null and unique.
4780 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4781 T must be pointer type, e.g. VmaAllocation, VmaPool.
4782 */
4783 template<typename T>
4784 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4785 {
4786  for(uint32_t i = 0; i < count; ++i)
4787  {
4788  const T iPtr = arr[i];
4789  if(iPtr == VMA_NULL)
4790  {
4791  return false;
4792  }
4793  for(uint32_t j = i + 1; j < count; ++j)
4794  {
4795  if(iPtr == arr[j])
4796  {
4797  return false;
4798  }
4799  }
4800  }
4801  return true;
4802 }
4803 
4804 template<typename MainT, typename NewT>
4805 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4806 {
4807  newStruct->pNext = mainStruct->pNext;
4808  mainStruct->pNext = newStruct;
4809 }
4810 
4812 // Memory allocation
4813 
4814 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4815 {
4816  void* result = VMA_NULL;
4817  if((pAllocationCallbacks != VMA_NULL) &&
4818  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4819  {
4820  result = (*pAllocationCallbacks->pfnAllocation)(
4821  pAllocationCallbacks->pUserData,
4822  size,
4823  alignment,
4824  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4825  }
4826  else
4827  {
4828  result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4829  }
4830  VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
4831  return result;
4832 }
4833 
4834 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4835 {
4836  if((pAllocationCallbacks != VMA_NULL) &&
4837  (pAllocationCallbacks->pfnFree != VMA_NULL))
4838  {
4839  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4840  }
4841  else
4842  {
4843  VMA_SYSTEM_ALIGNED_FREE(ptr);
4844  }
4845 }
4846 
4847 template<typename T>
4848 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4849 {
4850  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4851 }
4852 
4853 template<typename T>
4854 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4855 {
4856  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4857 }
4858 
4859 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4860 
4861 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4862 
4863 template<typename T>
4864 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4865 {
4866  ptr->~T();
4867  VmaFree(pAllocationCallbacks, ptr);
4868 }
4869 
4870 template<typename T>
4871 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4872 {
4873  if(ptr != VMA_NULL)
4874  {
4875  for(size_t i = count; i--; )
4876  {
4877  ptr[i].~T();
4878  }
4879  VmaFree(pAllocationCallbacks, ptr);
4880  }
4881 }
4882 
4883 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4884 {
4885  if(srcStr != VMA_NULL)
4886  {
4887  const size_t len = strlen(srcStr);
4888  char* const result = vma_new_array(allocs, char, len + 1);
4889  memcpy(result, srcStr, len + 1);
4890  return result;
4891  }
4892  else
4893  {
4894  return VMA_NULL;
4895  }
4896 }
4897 
4898 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4899 {
4900  if(str != VMA_NULL)
4901  {
4902  const size_t len = strlen(str);
4903  vma_delete_array(allocs, str, len + 1);
4904  }
4905 }
4906 
4907 // STL-compatible allocator.
4908 template<typename T>
4909 class VmaStlAllocator
4910 {
4911 public:
4912  const VkAllocationCallbacks* const m_pCallbacks;
4913  typedef T value_type;
4914 
4915  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4916  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4917 
4918  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4919  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4920 
4921  template<typename U>
4922  bool operator==(const VmaStlAllocator<U>& rhs) const
4923  {
4924  return m_pCallbacks == rhs.m_pCallbacks;
4925  }
4926  template<typename U>
4927  bool operator!=(const VmaStlAllocator<U>& rhs) const
4928  {
4929  return m_pCallbacks != rhs.m_pCallbacks;
4930  }
4931 
4932  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4933 };
4934 
4935 #if VMA_USE_STL_VECTOR
4936 
4937 #define VmaVector std::vector
4938 
4939 template<typename T, typename allocatorT>
4940 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4941 {
4942  vec.insert(vec.begin() + index, item);
4943 }
4944 
4945 template<typename T, typename allocatorT>
4946 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4947 {
4948  vec.erase(vec.begin() + index);
4949 }
4950 
4951 #else // #if VMA_USE_STL_VECTOR
4952 
4953 /* Class with interface compatible with subset of std::vector.
4954 T must be POD because constructors and destructors are not called and memcpy is
4955 used for these objects. */
4956 template<typename T, typename AllocatorT>
4957 class VmaVector
4958 {
4959 public:
4960  typedef T value_type;
4961 
4962  VmaVector(const AllocatorT& allocator) :
4963  m_Allocator(allocator),
4964  m_pArray(VMA_NULL),
4965  m_Count(0),
4966  m_Capacity(0)
4967  {
4968  }
4969 
4970  VmaVector(size_t count, const AllocatorT& allocator) :
4971  m_Allocator(allocator),
4972  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4973  m_Count(count),
4974  m_Capacity(count)
4975  {
4976  }
4977 
4978  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4979  // value is unused.
4980  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4981  : VmaVector(count, allocator) {}
4982 
4983  VmaVector(const VmaVector<T, AllocatorT>& src) :
4984  m_Allocator(src.m_Allocator),
4985  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4986  m_Count(src.m_Count),
4987  m_Capacity(src.m_Count)
4988  {
4989  if(m_Count != 0)
4990  {
4991  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4992  }
4993  }
4994 
4995  ~VmaVector()
4996  {
4997  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4998  }
4999 
5000  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
5001  {
5002  if(&rhs != this)
5003  {
5004  resize(rhs.m_Count);
5005  if(m_Count != 0)
5006  {
5007  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
5008  }
5009  }
5010  return *this;
5011  }
5012 
5013  bool empty() const { return m_Count == 0; }
5014  size_t size() const { return m_Count; }
5015  T* data() { return m_pArray; }
5016  const T* data() const { return m_pArray; }
5017 
5018  T& operator[](size_t index)
5019  {
5020  VMA_HEAVY_ASSERT(index < m_Count);
5021  return m_pArray[index];
5022  }
5023  const T& operator[](size_t index) const
5024  {
5025  VMA_HEAVY_ASSERT(index < m_Count);
5026  return m_pArray[index];
5027  }
5028 
5029  T& front()
5030  {
5031  VMA_HEAVY_ASSERT(m_Count > 0);
5032  return m_pArray[0];
5033  }
5034  const T& front() const
5035  {
5036  VMA_HEAVY_ASSERT(m_Count > 0);
5037  return m_pArray[0];
5038  }
5039  T& back()
5040  {
5041  VMA_HEAVY_ASSERT(m_Count > 0);
5042  return m_pArray[m_Count - 1];
5043  }
5044  const T& back() const
5045  {
5046  VMA_HEAVY_ASSERT(m_Count > 0);
5047  return m_pArray[m_Count - 1];
5048  }
5049 
5050  void reserve(size_t newCapacity, bool freeMemory = false)
5051  {
5052  newCapacity = VMA_MAX(newCapacity, m_Count);
5053 
5054  if((newCapacity < m_Capacity) && !freeMemory)
5055  {
5056  newCapacity = m_Capacity;
5057  }
5058 
5059  if(newCapacity != m_Capacity)
5060  {
5061  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
5062  if(m_Count != 0)
5063  {
5064  memcpy(newArray, m_pArray, m_Count * sizeof(T));
5065  }
5066  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5067  m_Capacity = newCapacity;
5068  m_pArray = newArray;
5069  }
5070  }
5071 
5072  void resize(size_t newCount, bool freeMemory = false)
5073  {
5074  size_t newCapacity = m_Capacity;
5075  if(newCount > m_Capacity)
5076  {
5077  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
5078  }
5079  else if(freeMemory)
5080  {
5081  newCapacity = newCount;
5082  }
5083 
5084  if(newCapacity != m_Capacity)
5085  {
5086  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
5087  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
5088  if(elementsToCopy != 0)
5089  {
5090  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
5091  }
5092  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5093  m_Capacity = newCapacity;
5094  m_pArray = newArray;
5095  }
5096 
5097  m_Count = newCount;
5098  }
5099 
5100  void clear(bool freeMemory = false)
5101  {
5102  resize(0, freeMemory);
5103  }
5104 
5105  void insert(size_t index, const T& src)
5106  {
5107  VMA_HEAVY_ASSERT(index <= m_Count);
5108  const size_t oldCount = size();
5109  resize(oldCount + 1);
5110  if(index < oldCount)
5111  {
5112  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
5113  }
5114  m_pArray[index] = src;
5115  }
5116 
5117  void remove(size_t index)
5118  {
5119  VMA_HEAVY_ASSERT(index < m_Count);
5120  const size_t oldCount = size();
5121  if(index < oldCount - 1)
5122  {
5123  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
5124  }
5125  resize(oldCount - 1);
5126  }
5127 
5128  void push_back(const T& src)
5129  {
5130  const size_t newIndex = size();
5131  resize(newIndex + 1);
5132  m_pArray[newIndex] = src;
5133  }
5134 
5135  void pop_back()
5136  {
5137  VMA_HEAVY_ASSERT(m_Count > 0);
5138  resize(size() - 1);
5139  }
5140 
5141  void push_front(const T& src)
5142  {
5143  insert(0, src);
5144  }
5145 
5146  void pop_front()
5147  {
5148  VMA_HEAVY_ASSERT(m_Count > 0);
5149  remove(0);
5150  }
5151 
5152  typedef T* iterator;
5153 
5154  iterator begin() { return m_pArray; }
5155  iterator end() { return m_pArray + m_Count; }
5156 
5157 private:
5158  AllocatorT m_Allocator;
5159  T* m_pArray;
5160  size_t m_Count;
5161  size_t m_Capacity;
5162 };
5163 
5164 template<typename T, typename allocatorT>
5165 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
5166 {
5167  vec.insert(index, item);
5168 }
5169 
5170 template<typename T, typename allocatorT>
5171 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
5172 {
5173  vec.remove(index);
5174 }
5175 
5176 #endif // #if VMA_USE_STL_VECTOR
5177 
5178 template<typename CmpLess, typename VectorT>
5179 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
5180 {
5181  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5182  vector.data(),
5183  vector.data() + vector.size(),
5184  value,
5185  CmpLess()) - vector.data();
5186  VmaVectorInsert(vector, indexToInsert, value);
5187  return indexToInsert;
5188 }
5189 
5190 template<typename CmpLess, typename VectorT>
5191 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
5192 {
5193  CmpLess comparator;
5194  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
5195  vector.begin(),
5196  vector.end(),
5197  value,
5198  comparator);
5199  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
5200  {
5201  size_t indexToRemove = it - vector.begin();
5202  VmaVectorRemove(vector, indexToRemove);
5203  return true;
5204  }
5205  return false;
5206 }
5207 
5209 // class VmaSmallVector
5210 
5211 /*
5212 This is a vector (a variable-sized array), optimized for the case when the array is small.
5213 
5214 It contains some number of elements in-place, which allows it to avoid heap allocation
5215 when the actual number of elements is below that threshold. This allows normal "small"
5216 cases to be fast without losing generality for large inputs.
5217 */
5218 
5219 template<typename T, typename AllocatorT, size_t N>
5220 class VmaSmallVector
5221 {
5222 public:
5223  typedef T value_type;
5224 
5225  VmaSmallVector(const AllocatorT& allocator) :
5226  m_Count(0),
5227  m_DynamicArray(allocator)
5228  {
5229  }
5230  VmaSmallVector(size_t count, const AllocatorT& allocator) :
5231  m_Count(count),
5232  m_DynamicArray(count > N ? count : 0, allocator)
5233  {
5234  }
5235  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5236  VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) = delete;
5237  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5238  VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) = delete;
5239 
5240  bool empty() const { return m_Count == 0; }
5241  size_t size() const { return m_Count; }
5242  T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5243  const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5244 
5245  T& operator[](size_t index)
5246  {
5247  VMA_HEAVY_ASSERT(index < m_Count);
5248  return data()[index];
5249  }
5250  const T& operator[](size_t index) const
5251  {
5252  VMA_HEAVY_ASSERT(index < m_Count);
5253  return data()[index];
5254  }
5255 
5256  T& front()
5257  {
5258  VMA_HEAVY_ASSERT(m_Count > 0);
5259  return data()[0];
5260  }
5261  const T& front() const
5262  {
5263  VMA_HEAVY_ASSERT(m_Count > 0);
5264  return data()[0];
5265  }
5266  T& back()
5267  {
5268  VMA_HEAVY_ASSERT(m_Count > 0);
5269  return data()[m_Count - 1];
5270  }
5271  const T& back() const
5272  {
5273  VMA_HEAVY_ASSERT(m_Count > 0);
5274  return data()[m_Count - 1];
5275  }
5276 
5277  void resize(size_t newCount, bool freeMemory = false)
5278  {
5279  if(newCount > N && m_Count > N)
5280  {
5281  // Any direction, staying in m_DynamicArray
5282  m_DynamicArray.resize(newCount, freeMemory);
5283  }
5284  else if(newCount > N && m_Count <= N)
5285  {
5286  // Growing, moving from m_StaticArray to m_DynamicArray
5287  m_DynamicArray.resize(newCount, freeMemory);
5288  if(m_Count > 0)
5289  {
5290  memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
5291  }
5292  }
5293  else if(newCount <= N && m_Count > N)
5294  {
5295  // Shrinking, moving from m_DynamicArray to m_StaticArray
5296  if(newCount > 0)
5297  {
5298  memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
5299  }
5300  m_DynamicArray.resize(0, freeMemory);
5301  }
5302  else
5303  {
5304  // Any direction, staying in m_StaticArray - nothing to do here
5305  }
5306  m_Count = newCount;
5307  }
5308 
5309  void clear(bool freeMemory = false)
5310  {
5311  m_DynamicArray.clear(freeMemory);
5312  m_Count = 0;
5313  }
5314 
5315  void insert(size_t index, const T& src)
5316  {
5317  VMA_HEAVY_ASSERT(index <= m_Count);
5318  const size_t oldCount = size();
5319  resize(oldCount + 1);
5320  T* const dataPtr = data();
5321  if(index < oldCount)
5322  {
5323  // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
5324  memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
5325  }
5326  dataPtr[index] = src;
5327  }
5328 
5329  void remove(size_t index)
5330  {
5331  VMA_HEAVY_ASSERT(index < m_Count);
5332  const size_t oldCount = size();
5333  if(index < oldCount - 1)
5334  {
5335  // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
5336  T* const dataPtr = data();
5337  memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
5338  }
5339  resize(oldCount - 1);
5340  }
5341 
5342  void push_back(const T& src)
5343  {
5344  const size_t newIndex = size();
5345  resize(newIndex + 1);
5346  data()[newIndex] = src;
5347  }
5348 
5349  void pop_back()
5350  {
5351  VMA_HEAVY_ASSERT(m_Count > 0);
5352  resize(size() - 1);
5353  }
5354 
5355  void push_front(const T& src)
5356  {
5357  insert(0, src);
5358  }
5359 
5360  void pop_front()
5361  {
5362  VMA_HEAVY_ASSERT(m_Count > 0);
5363  remove(0);
5364  }
5365 
5366  typedef T* iterator;
5367 
5368  iterator begin() { return data(); }
5369  iterator end() { return data() + m_Count; }
5370 
5371 private:
5372  size_t m_Count;
5373  T m_StaticArray[N]; // Used when m_Size <= N
5374  VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
5375 };
5376 
5378 // class VmaPoolAllocator
5379 
5380 /*
5381 Allocator for objects of type T using a list of arrays (pools) to speed up
5382 allocation. Number of elements that can be allocated is not bounded because
5383 allocator can create multiple blocks.
5384 */
5385 template<typename T>
5386 class VmaPoolAllocator
5387 {
5388  VMA_CLASS_NO_COPY(VmaPoolAllocator)
5389 public:
5390  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
5391  ~VmaPoolAllocator();
5392  template<typename... Types> T* Alloc(Types... args);
5393  void Free(T* ptr);
5394 
5395 private:
5396  union Item
5397  {
5398  uint32_t NextFreeIndex;
5399  alignas(T) char Value[sizeof(T)];
5400  };
5401 
5402  struct ItemBlock
5403  {
5404  Item* pItems;
5405  uint32_t Capacity;
5406  uint32_t FirstFreeIndex;
5407  };
5408 
5409  const VkAllocationCallbacks* m_pAllocationCallbacks;
5410  const uint32_t m_FirstBlockCapacity;
5411  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
5412 
5413  ItemBlock& CreateNewBlock();
5414 };
5415 
5416 template<typename T>
5417 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
5418  m_pAllocationCallbacks(pAllocationCallbacks),
5419  m_FirstBlockCapacity(firstBlockCapacity),
5420  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
5421 {
5422  VMA_ASSERT(m_FirstBlockCapacity > 1);
5423 }
5424 
5425 template<typename T>
5426 VmaPoolAllocator<T>::~VmaPoolAllocator()
5427 {
5428  for(size_t i = m_ItemBlocks.size(); i--; )
5429  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
5430  m_ItemBlocks.clear();
5431 }
5432 
5433 template<typename T>
5434 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
5435 {
5436  for(size_t i = m_ItemBlocks.size(); i--; )
5437  {
5438  ItemBlock& block = m_ItemBlocks[i];
5439  // This block has some free items: Use first one.
5440  if(block.FirstFreeIndex != UINT32_MAX)
5441  {
5442  Item* const pItem = &block.pItems[block.FirstFreeIndex];
5443  block.FirstFreeIndex = pItem->NextFreeIndex;
5444  T* result = (T*)&pItem->Value;
5445  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5446  return result;
5447  }
5448  }
5449 
5450  // No block has free item: Create new one and use it.
5451  ItemBlock& newBlock = CreateNewBlock();
5452  Item* const pItem = &newBlock.pItems[0];
5453  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
5454  T* result = (T*)&pItem->Value;
5455  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5456  return result;
5457 }
5458 
5459 template<typename T>
5460 void VmaPoolAllocator<T>::Free(T* ptr)
5461 {
5462  // Search all memory blocks to find ptr.
5463  for(size_t i = m_ItemBlocks.size(); i--; )
5464  {
5465  ItemBlock& block = m_ItemBlocks[i];
5466 
5467  // Casting to union.
5468  Item* pItemPtr;
5469  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
5470 
5471  // Check if pItemPtr is in address range of this block.
5472  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
5473  {
5474  ptr->~T(); // Explicit destructor call.
5475  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
5476  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
5477  block.FirstFreeIndex = index;
5478  return;
5479  }
5480  }
5481  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
5482 }
5483 
5484 template<typename T>
5485 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
5486 {
5487  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
5488  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
5489 
5490  const ItemBlock newBlock = {
5491  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
5492  newBlockCapacity,
5493  0 };
5494 
5495  m_ItemBlocks.push_back(newBlock);
5496 
5497  // Setup singly-linked list of all free items in this block.
5498  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
5499  newBlock.pItems[i].NextFreeIndex = i + 1;
5500  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
5501  return m_ItemBlocks.back();
5502 }
5503 
5505 // class VmaRawList, VmaList
5506 
5507 #if VMA_USE_STL_LIST
5508 
5509 #define VmaList std::list
5510 
5511 #else // #if VMA_USE_STL_LIST
5512 
5513 template<typename T>
5514 struct VmaListItem
5515 {
5516  VmaListItem* pPrev;
5517  VmaListItem* pNext;
5518  T Value;
5519 };
5520 
5521 // Doubly linked list.
5522 template<typename T>
5523 class VmaRawList
5524 {
5525  VMA_CLASS_NO_COPY(VmaRawList)
5526 public:
5527  typedef VmaListItem<T> ItemType;
5528 
5529  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
5530  ~VmaRawList();
5531  void Clear();
5532 
5533  size_t GetCount() const { return m_Count; }
5534  bool IsEmpty() const { return m_Count == 0; }
5535 
5536  ItemType* Front() { return m_pFront; }
5537  const ItemType* Front() const { return m_pFront; }
5538  ItemType* Back() { return m_pBack; }
5539  const ItemType* Back() const { return m_pBack; }
5540 
5541  ItemType* PushBack();
5542  ItemType* PushFront();
5543  ItemType* PushBack(const T& value);
5544  ItemType* PushFront(const T& value);
5545  void PopBack();
5546  void PopFront();
5547 
5548  // Item can be null - it means PushBack.
5549  ItemType* InsertBefore(ItemType* pItem);
5550  // Item can be null - it means PushFront.
5551  ItemType* InsertAfter(ItemType* pItem);
5552 
5553  ItemType* InsertBefore(ItemType* pItem, const T& value);
5554  ItemType* InsertAfter(ItemType* pItem, const T& value);
5555 
5556  void Remove(ItemType* pItem);
5557 
5558 private:
5559  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5560  VmaPoolAllocator<ItemType> m_ItemAllocator;
5561  ItemType* m_pFront;
5562  ItemType* m_pBack;
5563  size_t m_Count;
5564 };
5565 
5566 template<typename T>
5567 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5568  m_pAllocationCallbacks(pAllocationCallbacks),
5569  m_ItemAllocator(pAllocationCallbacks, 128),
5570  m_pFront(VMA_NULL),
5571  m_pBack(VMA_NULL),
5572  m_Count(0)
5573 {
5574 }
5575 
5576 template<typename T>
5577 VmaRawList<T>::~VmaRawList()
5578 {
5579  // Intentionally not calling Clear, because that would be unnecessary
5580  // computations to return all items to m_ItemAllocator as free.
5581 }
5582 
5583 template<typename T>
5584 void VmaRawList<T>::Clear()
5585 {
5586  if(IsEmpty() == false)
5587  {
5588  ItemType* pItem = m_pBack;
5589  while(pItem != VMA_NULL)
5590  {
5591  ItemType* const pPrevItem = pItem->pPrev;
5592  m_ItemAllocator.Free(pItem);
5593  pItem = pPrevItem;
5594  }
5595  m_pFront = VMA_NULL;
5596  m_pBack = VMA_NULL;
5597  m_Count = 0;
5598  }
5599 }
5600 
5601 template<typename T>
5602 VmaListItem<T>* VmaRawList<T>::PushBack()
5603 {
5604  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5605  pNewItem->pNext = VMA_NULL;
5606  if(IsEmpty())
5607  {
5608  pNewItem->pPrev = VMA_NULL;
5609  m_pFront = pNewItem;
5610  m_pBack = pNewItem;
5611  m_Count = 1;
5612  }
5613  else
5614  {
5615  pNewItem->pPrev = m_pBack;
5616  m_pBack->pNext = pNewItem;
5617  m_pBack = pNewItem;
5618  ++m_Count;
5619  }
5620  return pNewItem;
5621 }
5622 
5623 template<typename T>
5624 VmaListItem<T>* VmaRawList<T>::PushFront()
5625 {
5626  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5627  pNewItem->pPrev = VMA_NULL;
5628  if(IsEmpty())
5629  {
5630  pNewItem->pNext = VMA_NULL;
5631  m_pFront = pNewItem;
5632  m_pBack = pNewItem;
5633  m_Count = 1;
5634  }
5635  else
5636  {
5637  pNewItem->pNext = m_pFront;
5638  m_pFront->pPrev = pNewItem;
5639  m_pFront = pNewItem;
5640  ++m_Count;
5641  }
5642  return pNewItem;
5643 }
5644 
5645 template<typename T>
5646 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5647 {
5648  ItemType* const pNewItem = PushBack();
5649  pNewItem->Value = value;
5650  return pNewItem;
5651 }
5652 
5653 template<typename T>
5654 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5655 {
5656  ItemType* const pNewItem = PushFront();
5657  pNewItem->Value = value;
5658  return pNewItem;
5659 }
5660 
5661 template<typename T>
5662 void VmaRawList<T>::PopBack()
5663 {
5664  VMA_HEAVY_ASSERT(m_Count > 0);
5665  ItemType* const pBackItem = m_pBack;
5666  ItemType* const pPrevItem = pBackItem->pPrev;
5667  if(pPrevItem != VMA_NULL)
5668  {
5669  pPrevItem->pNext = VMA_NULL;
5670  }
5671  m_pBack = pPrevItem;
5672  m_ItemAllocator.Free(pBackItem);
5673  --m_Count;
5674 }
5675 
5676 template<typename T>
5677 void VmaRawList<T>::PopFront()
5678 {
5679  VMA_HEAVY_ASSERT(m_Count > 0);
5680  ItemType* const pFrontItem = m_pFront;
5681  ItemType* const pNextItem = pFrontItem->pNext;
5682  if(pNextItem != VMA_NULL)
5683  {
5684  pNextItem->pPrev = VMA_NULL;
5685  }
5686  m_pFront = pNextItem;
5687  m_ItemAllocator.Free(pFrontItem);
5688  --m_Count;
5689 }
5690 
5691 template<typename T>
5692 void VmaRawList<T>::Remove(ItemType* pItem)
5693 {
5694  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5695  VMA_HEAVY_ASSERT(m_Count > 0);
5696 
5697  if(pItem->pPrev != VMA_NULL)
5698  {
5699  pItem->pPrev->pNext = pItem->pNext;
5700  }
5701  else
5702  {
5703  VMA_HEAVY_ASSERT(m_pFront == pItem);
5704  m_pFront = pItem->pNext;
5705  }
5706 
5707  if(pItem->pNext != VMA_NULL)
5708  {
5709  pItem->pNext->pPrev = pItem->pPrev;
5710  }
5711  else
5712  {
5713  VMA_HEAVY_ASSERT(m_pBack == pItem);
5714  m_pBack = pItem->pPrev;
5715  }
5716 
5717  m_ItemAllocator.Free(pItem);
5718  --m_Count;
5719 }
5720 
5721 template<typename T>
5722 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5723 {
5724  if(pItem != VMA_NULL)
5725  {
5726  ItemType* const prevItem = pItem->pPrev;
5727  ItemType* const newItem = m_ItemAllocator.Alloc();
5728  newItem->pPrev = prevItem;
5729  newItem->pNext = pItem;
5730  pItem->pPrev = newItem;
5731  if(prevItem != VMA_NULL)
5732  {
5733  prevItem->pNext = newItem;
5734  }
5735  else
5736  {
5737  VMA_HEAVY_ASSERT(m_pFront == pItem);
5738  m_pFront = newItem;
5739  }
5740  ++m_Count;
5741  return newItem;
5742  }
5743  else
5744  return PushBack();
5745 }
5746 
5747 template<typename T>
5748 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5749 {
5750  if(pItem != VMA_NULL)
5751  {
5752  ItemType* const nextItem = pItem->pNext;
5753  ItemType* const newItem = m_ItemAllocator.Alloc();
5754  newItem->pNext = nextItem;
5755  newItem->pPrev = pItem;
5756  pItem->pNext = newItem;
5757  if(nextItem != VMA_NULL)
5758  {
5759  nextItem->pPrev = newItem;
5760  }
5761  else
5762  {
5763  VMA_HEAVY_ASSERT(m_pBack == pItem);
5764  m_pBack = newItem;
5765  }
5766  ++m_Count;
5767  return newItem;
5768  }
5769  else
5770  return PushFront();
5771 }
5772 
5773 template<typename T>
5774 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5775 {
5776  ItemType* const newItem = InsertBefore(pItem);
5777  newItem->Value = value;
5778  return newItem;
5779 }
5780 
5781 template<typename T>
5782 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5783 {
5784  ItemType* const newItem = InsertAfter(pItem);
5785  newItem->Value = value;
5786  return newItem;
5787 }
5788 
5789 template<typename T, typename AllocatorT>
5790 class VmaList
5791 {
5792  VMA_CLASS_NO_COPY(VmaList)
5793 public:
5794  class iterator
5795  {
5796  public:
5797  iterator() :
5798  m_pList(VMA_NULL),
5799  m_pItem(VMA_NULL)
5800  {
5801  }
5802 
5803  T& operator*() const
5804  {
5805  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5806  return m_pItem->Value;
5807  }
5808  T* operator->() const
5809  {
5810  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5811  return &m_pItem->Value;
5812  }
5813 
5814  iterator& operator++()
5815  {
5816  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5817  m_pItem = m_pItem->pNext;
5818  return *this;
5819  }
5820  iterator& operator--()
5821  {
5822  if(m_pItem != VMA_NULL)
5823  {
5824  m_pItem = m_pItem->pPrev;
5825  }
5826  else
5827  {
5828  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5829  m_pItem = m_pList->Back();
5830  }
5831  return *this;
5832  }
5833 
5834  iterator operator++(int)
5835  {
5836  iterator result = *this;
5837  ++*this;
5838  return result;
5839  }
5840  iterator operator--(int)
5841  {
5842  iterator result = *this;
5843  --*this;
5844  return result;
5845  }
5846 
5847  bool operator==(const iterator& rhs) const
5848  {
5849  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5850  return m_pItem == rhs.m_pItem;
5851  }
5852  bool operator!=(const iterator& rhs) const
5853  {
5854  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5855  return m_pItem != rhs.m_pItem;
5856  }
5857 
5858  private:
5859  VmaRawList<T>* m_pList;
5860  VmaListItem<T>* m_pItem;
5861 
5862  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5863  m_pList(pList),
5864  m_pItem(pItem)
5865  {
5866  }
5867 
5868  friend class VmaList<T, AllocatorT>;
5869  };
5870 
5871  class const_iterator
5872  {
5873  public:
5874  const_iterator() :
5875  m_pList(VMA_NULL),
5876  m_pItem(VMA_NULL)
5877  {
5878  }
5879 
5880  const_iterator(const iterator& src) :
5881  m_pList(src.m_pList),
5882  m_pItem(src.m_pItem)
5883  {
5884  }
5885 
5886  const T& operator*() const
5887  {
5888  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5889  return m_pItem->Value;
5890  }
5891  const T* operator->() const
5892  {
5893  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5894  return &m_pItem->Value;
5895  }
5896 
5897  const_iterator& operator++()
5898  {
5899  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5900  m_pItem = m_pItem->pNext;
5901  return *this;
5902  }
5903  const_iterator& operator--()
5904  {
5905  if(m_pItem != VMA_NULL)
5906  {
5907  m_pItem = m_pItem->pPrev;
5908  }
5909  else
5910  {
5911  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5912  m_pItem = m_pList->Back();
5913  }
5914  return *this;
5915  }
5916 
5917  const_iterator operator++(int)
5918  {
5919  const_iterator result = *this;
5920  ++*this;
5921  return result;
5922  }
5923  const_iterator operator--(int)
5924  {
5925  const_iterator result = *this;
5926  --*this;
5927  return result;
5928  }
5929 
5930  bool operator==(const const_iterator& rhs) const
5931  {
5932  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5933  return m_pItem == rhs.m_pItem;
5934  }
5935  bool operator!=(const const_iterator& rhs) const
5936  {
5937  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5938  return m_pItem != rhs.m_pItem;
5939  }
5940 
5941  private:
5942  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5943  m_pList(pList),
5944  m_pItem(pItem)
5945  {
5946  }
5947 
5948  const VmaRawList<T>* m_pList;
5949  const VmaListItem<T>* m_pItem;
5950 
5951  friend class VmaList<T, AllocatorT>;
5952  };
5953 
5954  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5955 
5956  bool empty() const { return m_RawList.IsEmpty(); }
5957  size_t size() const { return m_RawList.GetCount(); }
5958 
5959  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5960  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5961 
5962  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5963  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5964 
5965  void clear() { m_RawList.Clear(); }
5966  void push_back(const T& value) { m_RawList.PushBack(value); }
5967  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5968  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5969 
5970 private:
5971  VmaRawList<T> m_RawList;
5972 };
5973 
5974 #endif // #if VMA_USE_STL_LIST
5975 
5977 // class VmaMap
5978 
5979 // Unused in this version.
5980 #if 0
5981 
5982 #if VMA_USE_STL_UNORDERED_MAP
5983 
5984 #define VmaPair std::pair
5985 
5986 #define VMA_MAP_TYPE(KeyT, ValueT) \
5987  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5988 
5989 #else // #if VMA_USE_STL_UNORDERED_MAP
5990 
5991 template<typename T1, typename T2>
5992 struct VmaPair
5993 {
5994  T1 first;
5995  T2 second;
5996 
5997  VmaPair() : first(), second() { }
5998  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5999 };
6000 
6001 /* Class compatible with subset of interface of std::unordered_map.
6002 KeyT, ValueT must be POD because they will be stored in VmaVector.
6003 */
6004 template<typename KeyT, typename ValueT>
6005 class VmaMap
6006 {
6007 public:
6008  typedef VmaPair<KeyT, ValueT> PairType;
6009  typedef PairType* iterator;
6010 
6011  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
6012 
6013  iterator begin() { return m_Vector.begin(); }
6014  iterator end() { return m_Vector.end(); }
6015 
6016  void insert(const PairType& pair);
6017  iterator find(const KeyT& key);
6018  void erase(iterator it);
6019 
6020 private:
6021  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
6022 };
6023 
6024 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
6025 
6026 template<typename FirstT, typename SecondT>
6027 struct VmaPairFirstLess
6028 {
6029  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
6030  {
6031  return lhs.first < rhs.first;
6032  }
6033  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
6034  {
6035  return lhs.first < rhsFirst;
6036  }
6037 };
6038 
6039 template<typename KeyT, typename ValueT>
6040 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
6041 {
6042  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
6043  m_Vector.data(),
6044  m_Vector.data() + m_Vector.size(),
6045  pair,
6046  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
6047  VmaVectorInsert(m_Vector, indexToInsert, pair);
6048 }
6049 
6050 template<typename KeyT, typename ValueT>
6051 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
6052 {
6053  PairType* it = VmaBinaryFindFirstNotLess(
6054  m_Vector.data(),
6055  m_Vector.data() + m_Vector.size(),
6056  key,
6057  VmaPairFirstLess<KeyT, ValueT>());
6058  if((it != m_Vector.end()) && (it->first == key))
6059  {
6060  return it;
6061  }
6062  else
6063  {
6064  return m_Vector.end();
6065  }
6066 }
6067 
6068 template<typename KeyT, typename ValueT>
6069 void VmaMap<KeyT, ValueT>::erase(iterator it)
6070 {
6071  VmaVectorRemove(m_Vector, it - m_Vector.begin());
6072 }
6073 
6074 #endif // #if VMA_USE_STL_UNORDERED_MAP
6075 
6076 #endif // #if 0
6077 
6079 
6080 class VmaDeviceMemoryBlock;
6081 
6082 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
6083 
6084 struct VmaAllocation_T
6085 {
6086 private:
6087  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
6088 
6089  enum FLAGS
6090  {
6091  FLAG_USER_DATA_STRING = 0x01,
6092  };
6093 
6094 public:
6095  enum ALLOCATION_TYPE
6096  {
6097  ALLOCATION_TYPE_NONE,
6098  ALLOCATION_TYPE_BLOCK,
6099  ALLOCATION_TYPE_DEDICATED,
6100  };
6101 
6102  /*
6103  This struct is allocated using VmaPoolAllocator.
6104  */
6105 
6106  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
6107  m_Alignment{1},
6108  m_Size{0},
6109  m_pUserData{VMA_NULL},
6110  m_LastUseFrameIndex{currentFrameIndex},
6111  m_MemoryTypeIndex{0},
6112  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
6113  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
6114  m_MapCount{0},
6115  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
6116  {
6117 #if VMA_STATS_STRING_ENABLED
6118  m_CreationFrameIndex = currentFrameIndex;
6119  m_BufferImageUsage = 0;
6120 #endif
6121  }
6122 
6123  ~VmaAllocation_T()
6124  {
6125  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
6126 
6127  // Check if owned string was freed.
6128  VMA_ASSERT(m_pUserData == VMA_NULL);
6129  }
6130 
6131  void InitBlockAllocation(
6132  VmaDeviceMemoryBlock* block,
6133  VkDeviceSize offset,
6134  VkDeviceSize alignment,
6135  VkDeviceSize size,
6136  uint32_t memoryTypeIndex,
6137  VmaSuballocationType suballocationType,
6138  bool mapped,
6139  bool canBecomeLost)
6140  {
6141  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6142  VMA_ASSERT(block != VMA_NULL);
6143  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6144  m_Alignment = alignment;
6145  m_Size = size;
6146  m_MemoryTypeIndex = memoryTypeIndex;
6147  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6148  m_SuballocationType = (uint8_t)suballocationType;
6149  m_BlockAllocation.m_Block = block;
6150  m_BlockAllocation.m_Offset = offset;
6151  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
6152  }
6153 
6154  void InitLost()
6155  {
6156  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6157  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
6158  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6159  m_MemoryTypeIndex = 0;
6160  m_BlockAllocation.m_Block = VMA_NULL;
6161  m_BlockAllocation.m_Offset = 0;
6162  m_BlockAllocation.m_CanBecomeLost = true;
6163  }
6164 
6165  void ChangeBlockAllocation(
6166  VmaAllocator hAllocator,
6167  VmaDeviceMemoryBlock* block,
6168  VkDeviceSize offset);
6169 
6170  void ChangeOffset(VkDeviceSize newOffset);
6171 
6172  // pMappedData not null means allocation is created with MAPPED flag.
6173  void InitDedicatedAllocation(
6174  uint32_t memoryTypeIndex,
6175  VkDeviceMemory hMemory,
6176  VmaSuballocationType suballocationType,
6177  void* pMappedData,
6178  VkDeviceSize size)
6179  {
6180  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6181  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
6182  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
6183  m_Alignment = 0;
6184  m_Size = size;
6185  m_MemoryTypeIndex = memoryTypeIndex;
6186  m_SuballocationType = (uint8_t)suballocationType;
6187  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6188  m_DedicatedAllocation.m_hMemory = hMemory;
6189  m_DedicatedAllocation.m_pMappedData = pMappedData;
6190  }
6191 
6192  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
6193  VkDeviceSize GetAlignment() const { return m_Alignment; }
6194  VkDeviceSize GetSize() const { return m_Size; }
6195  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
6196  void* GetUserData() const { return m_pUserData; }
6197  void SetUserData(VmaAllocator hAllocator, void* pUserData);
6198  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
6199 
6200  VmaDeviceMemoryBlock* GetBlock() const
6201  {
6202  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6203  return m_BlockAllocation.m_Block;
6204  }
6205  VkDeviceSize GetOffset() const;
6206  VkDeviceMemory GetMemory() const;
6207  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6208  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
6209  void* GetMappedData() const;
6210  bool CanBecomeLost() const;
6211 
6212  uint32_t GetLastUseFrameIndex() const
6213  {
6214  return m_LastUseFrameIndex.load();
6215  }
6216  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
6217  {
6218  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
6219  }
6220  /*
6221  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
6222  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
6223  - Else, returns false.
6224 
6225  If hAllocation is already lost, assert - you should not call it then.
6226  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
6227  */
6228  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6229 
6230  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
6231  {
6232  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
6233  outInfo.blockCount = 1;
6234  outInfo.allocationCount = 1;
6235  outInfo.unusedRangeCount = 0;
6236  outInfo.usedBytes = m_Size;
6237  outInfo.unusedBytes = 0;
6238  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
6239  outInfo.unusedRangeSizeMin = UINT64_MAX;
6240  outInfo.unusedRangeSizeMax = 0;
6241  }
6242 
6243  void BlockAllocMap();
6244  void BlockAllocUnmap();
6245  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
6246  void DedicatedAllocUnmap(VmaAllocator hAllocator);
6247 
6248 #if VMA_STATS_STRING_ENABLED
6249  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
6250  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
6251 
6252  void InitBufferImageUsage(uint32_t bufferImageUsage)
6253  {
6254  VMA_ASSERT(m_BufferImageUsage == 0);
6255  m_BufferImageUsage = bufferImageUsage;
6256  }
6257 
6258  void PrintParameters(class VmaJsonWriter& json) const;
6259 #endif
6260 
6261 private:
6262  VkDeviceSize m_Alignment;
6263  VkDeviceSize m_Size;
6264  void* m_pUserData;
6265  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
6266  uint32_t m_MemoryTypeIndex;
6267  uint8_t m_Type; // ALLOCATION_TYPE
6268  uint8_t m_SuballocationType; // VmaSuballocationType
6269  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
6270  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
6271  uint8_t m_MapCount;
6272  uint8_t m_Flags; // enum FLAGS
6273 
6274  // Allocation out of VmaDeviceMemoryBlock.
6275  struct BlockAllocation
6276  {
6277  VmaDeviceMemoryBlock* m_Block;
6278  VkDeviceSize m_Offset;
6279  bool m_CanBecomeLost;
6280  };
6281 
6282  // Allocation for an object that has its own private VkDeviceMemory.
6283  struct DedicatedAllocation
6284  {
6285  VkDeviceMemory m_hMemory;
6286  void* m_pMappedData; // Not null means memory is mapped.
6287  };
6288 
6289  union
6290  {
6291  // Allocation out of VmaDeviceMemoryBlock.
6292  BlockAllocation m_BlockAllocation;
6293  // Allocation for an object that has its own private VkDeviceMemory.
6294  DedicatedAllocation m_DedicatedAllocation;
6295  };
6296 
6297 #if VMA_STATS_STRING_ENABLED
6298  uint32_t m_CreationFrameIndex;
6299  uint32_t m_BufferImageUsage; // 0 if unknown.
6300 #endif
6301 
6302  void FreeUserDataString(VmaAllocator hAllocator);
6303 };
6304 
6305 /*
6306 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
6307 allocated memory block or free.
6308 */
6309 struct VmaSuballocation
6310 {
6311  VkDeviceSize offset;
6312  VkDeviceSize size;
6313  VmaAllocation hAllocation;
6314  VmaSuballocationType type;
6315 };
6316 
6317 // Comparator for offsets.
6318 struct VmaSuballocationOffsetLess
6319 {
6320  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6321  {
6322  return lhs.offset < rhs.offset;
6323  }
6324 };
6325 struct VmaSuballocationOffsetGreater
6326 {
6327  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6328  {
6329  return lhs.offset > rhs.offset;
6330  }
6331 };
6332 
6333 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
6334 
6335 // Cost of one additional allocation lost, as equivalent in bytes.
6336 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
6337 
6338 enum class VmaAllocationRequestType
6339 {
6340  Normal,
6341  // Used by "Linear" algorithm.
6342  UpperAddress,
6343  EndOf1st,
6344  EndOf2nd,
6345 };
6346 
6347 /*
6348 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
6349 
6350 If canMakeOtherLost was false:
6351 - item points to a FREE suballocation.
6352 - itemsToMakeLostCount is 0.
6353 
6354 If canMakeOtherLost was true:
6355 - item points to first of sequence of suballocations, which are either FREE,
6356  or point to VmaAllocations that can become lost.
6357 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
6358  the requested allocation to succeed.
6359 */
6360 struct VmaAllocationRequest
6361 {
6362  VkDeviceSize offset;
6363  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
6364  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
6365  VmaSuballocationList::iterator item;
6366  size_t itemsToMakeLostCount;
6367  void* customData;
6368  VmaAllocationRequestType type;
6369 
6370  VkDeviceSize CalcCost() const
6371  {
6372  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
6373  }
6374 };
6375 
6376 /*
6377 Data structure used for bookkeeping of allocations and unused ranges of memory
6378 in a single VkDeviceMemory block.
6379 */
6380 class VmaBlockMetadata
6381 {
6382 public:
6383  VmaBlockMetadata(VmaAllocator hAllocator);
6384  virtual ~VmaBlockMetadata() { }
6385  virtual void Init(VkDeviceSize size) { m_Size = size; }
6386 
6387  // Validates all data structures inside this object. If not valid, returns false.
6388  virtual bool Validate() const = 0;
6389  VkDeviceSize GetSize() const { return m_Size; }
6390  virtual size_t GetAllocationCount() const = 0;
6391  virtual VkDeviceSize GetSumFreeSize() const = 0;
6392  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
6393  // Returns true if this block is empty - contains only single free suballocation.
6394  virtual bool IsEmpty() const = 0;
6395 
6396  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
6397  // Shouldn't modify blockCount.
6398  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
6399 
6400 #if VMA_STATS_STRING_ENABLED
6401  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
6402 #endif
6403 
6404  // Tries to find a place for suballocation with given parameters inside this block.
6405  // If succeeded, fills pAllocationRequest and returns true.
6406  // If failed, returns false.
6407  virtual bool CreateAllocationRequest(
6408  uint32_t currentFrameIndex,
6409  uint32_t frameInUseCount,
6410  VkDeviceSize bufferImageGranularity,
6411  VkDeviceSize allocSize,
6412  VkDeviceSize allocAlignment,
6413  bool upperAddress,
6414  VmaSuballocationType allocType,
6415  bool canMakeOtherLost,
6416  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6417  uint32_t strategy,
6418  VmaAllocationRequest* pAllocationRequest) = 0;
6419 
6420  virtual bool MakeRequestedAllocationsLost(
6421  uint32_t currentFrameIndex,
6422  uint32_t frameInUseCount,
6423  VmaAllocationRequest* pAllocationRequest) = 0;
6424 
6425  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
6426 
6427  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6428 
6429  // Makes actual allocation based on request. Request must already be checked and valid.
6430  virtual void Alloc(
6431  const VmaAllocationRequest& request,
6432  VmaSuballocationType type,
6433  VkDeviceSize allocSize,
6434  VmaAllocation hAllocation) = 0;
6435 
6436  // Frees suballocation assigned to given memory region.
6437  virtual void Free(const VmaAllocation allocation) = 0;
6438  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
6439 
6440 protected:
6441  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
6442 
6443 #if VMA_STATS_STRING_ENABLED
6444  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6445  VkDeviceSize unusedBytes,
6446  size_t allocationCount,
6447  size_t unusedRangeCount) const;
6448  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6449  VkDeviceSize offset,
6450  VmaAllocation hAllocation) const;
6451  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6452  VkDeviceSize offset,
6453  VkDeviceSize size) const;
6454  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6455 #endif
6456 
6457 private:
6458  VkDeviceSize m_Size;
6459  const VkAllocationCallbacks* m_pAllocationCallbacks;
6460 };
6461 
6462 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
6463  VMA_ASSERT(0 && "Validation failed: " #cond); \
6464  return false; \
6465  } } while(false)
6466 
6467 class VmaBlockMetadata_Generic : public VmaBlockMetadata
6468 {
6469  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
6470 public:
6471  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
6472  virtual ~VmaBlockMetadata_Generic();
6473  virtual void Init(VkDeviceSize size);
6474 
6475  virtual bool Validate() const;
6476  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
6477  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6478  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6479  virtual bool IsEmpty() const;
6480 
6481  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6482  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6483 
6484 #if VMA_STATS_STRING_ENABLED
6485  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6486 #endif
6487 
6488  virtual bool CreateAllocationRequest(
6489  uint32_t currentFrameIndex,
6490  uint32_t frameInUseCount,
6491  VkDeviceSize bufferImageGranularity,
6492  VkDeviceSize allocSize,
6493  VkDeviceSize allocAlignment,
6494  bool upperAddress,
6495  VmaSuballocationType allocType,
6496  bool canMakeOtherLost,
6497  uint32_t strategy,
6498  VmaAllocationRequest* pAllocationRequest);
6499 
6500  virtual bool MakeRequestedAllocationsLost(
6501  uint32_t currentFrameIndex,
6502  uint32_t frameInUseCount,
6503  VmaAllocationRequest* pAllocationRequest);
6504 
6505  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6506 
6507  virtual VkResult CheckCorruption(const void* pBlockData);
6508 
6509  virtual void Alloc(
6510  const VmaAllocationRequest& request,
6511  VmaSuballocationType type,
6512  VkDeviceSize allocSize,
6513  VmaAllocation hAllocation);
6514 
6515  virtual void Free(const VmaAllocation allocation);
6516  virtual void FreeAtOffset(VkDeviceSize offset);
6517 
6519  // For defragmentation
6520 
6521  bool IsBufferImageGranularityConflictPossible(
6522  VkDeviceSize bufferImageGranularity,
6523  VmaSuballocationType& inOutPrevSuballocType) const;
6524 
6525 private:
6526  friend class VmaDefragmentationAlgorithm_Generic;
6527  friend class VmaDefragmentationAlgorithm_Fast;
6528 
6529  uint32_t m_FreeCount;
6530  VkDeviceSize m_SumFreeSize;
6531  VmaSuballocationList m_Suballocations;
6532  // Suballocations that are free and have size greater than certain threshold.
6533  // Sorted by size, ascending.
6534  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
6535 
6536  bool ValidateFreeSuballocationList() const;
6537 
6538  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6539  // If yes, fills pOffset and returns true. If no, returns false.
6540  bool CheckAllocation(
6541  uint32_t currentFrameIndex,
6542  uint32_t frameInUseCount,
6543  VkDeviceSize bufferImageGranularity,
6544  VkDeviceSize allocSize,
6545  VkDeviceSize allocAlignment,
6546  VmaSuballocationType allocType,
6547  VmaSuballocationList::const_iterator suballocItem,
6548  bool canMakeOtherLost,
6549  VkDeviceSize* pOffset,
6550  size_t* itemsToMakeLostCount,
6551  VkDeviceSize* pSumFreeSize,
6552  VkDeviceSize* pSumItemSize) const;
6553  // Given free suballocation, it merges it with following one, which must also be free.
6554  void MergeFreeWithNext(VmaSuballocationList::iterator item);
6555  // Releases given suballocation, making it free.
6556  // Merges it with adjacent free suballocations if applicable.
6557  // Returns iterator to new free suballocation at this place.
6558  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6559  // Given free suballocation, it inserts it into sorted list of
6560  // m_FreeSuballocationsBySize if it's suitable.
6561  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6562  // Given free suballocation, it removes it from sorted list of
6563  // m_FreeSuballocationsBySize if it's suitable.
6564  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6565 };
6566 
6567 /*
6568 Allocations and their references in internal data structure look like this:
6569 
6570 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6571 
6572  0 +-------+
6573  | |
6574  | |
6575  | |
6576  +-------+
6577  | Alloc | 1st[m_1stNullItemsBeginCount]
6578  +-------+
6579  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6580  +-------+
6581  | ... |
6582  +-------+
6583  | Alloc | 1st[1st.size() - 1]
6584  +-------+
6585  | |
6586  | |
6587  | |
6588 GetSize() +-------+
6589 
6590 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6591 
6592  0 +-------+
6593  | Alloc | 2nd[0]
6594  +-------+
6595  | Alloc | 2nd[1]
6596  +-------+
6597  | ... |
6598  +-------+
6599  | Alloc | 2nd[2nd.size() - 1]
6600  +-------+
6601  | |
6602  | |
6603  | |
6604  +-------+
6605  | Alloc | 1st[m_1stNullItemsBeginCount]
6606  +-------+
6607  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6608  +-------+
6609  | ... |
6610  +-------+
6611  | Alloc | 1st[1st.size() - 1]
6612  +-------+
6613  | |
6614 GetSize() +-------+
6615 
6616 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6617 
6618  0 +-------+
6619  | |
6620  | |
6621  | |
6622  +-------+
6623  | Alloc | 1st[m_1stNullItemsBeginCount]
6624  +-------+
6625  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6626  +-------+
6627  | ... |
6628  +-------+
6629  | Alloc | 1st[1st.size() - 1]
6630  +-------+
6631  | |
6632  | |
6633  | |
6634  +-------+
6635  | Alloc | 2nd[2nd.size() - 1]
6636  +-------+
6637  | ... |
6638  +-------+
6639  | Alloc | 2nd[1]
6640  +-------+
6641  | Alloc | 2nd[0]
6642 GetSize() +-------+
6643 
6644 */
6645 class VmaBlockMetadata_Linear : public VmaBlockMetadata
6646 {
6647  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
6648 public:
6649  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
6650  virtual ~VmaBlockMetadata_Linear();
6651  virtual void Init(VkDeviceSize size);
6652 
6653  virtual bool Validate() const;
6654  virtual size_t GetAllocationCount() const;
6655  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6656  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6657  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
6658 
6659  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6660  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6661 
6662 #if VMA_STATS_STRING_ENABLED
6663  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6664 #endif
6665 
6666  virtual bool CreateAllocationRequest(
6667  uint32_t currentFrameIndex,
6668  uint32_t frameInUseCount,
6669  VkDeviceSize bufferImageGranularity,
6670  VkDeviceSize allocSize,
6671  VkDeviceSize allocAlignment,
6672  bool upperAddress,
6673  VmaSuballocationType allocType,
6674  bool canMakeOtherLost,
6675  uint32_t strategy,
6676  VmaAllocationRequest* pAllocationRequest);
6677 
6678  virtual bool MakeRequestedAllocationsLost(
6679  uint32_t currentFrameIndex,
6680  uint32_t frameInUseCount,
6681  VmaAllocationRequest* pAllocationRequest);
6682 
6683  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6684 
6685  virtual VkResult CheckCorruption(const void* pBlockData);
6686 
6687  virtual void Alloc(
6688  const VmaAllocationRequest& request,
6689  VmaSuballocationType type,
6690  VkDeviceSize allocSize,
6691  VmaAllocation hAllocation);
6692 
6693  virtual void Free(const VmaAllocation allocation);
6694  virtual void FreeAtOffset(VkDeviceSize offset);
6695 
6696 private:
6697  /*
6698  There are two suballocation vectors, used in ping-pong way.
6699  The one with index m_1stVectorIndex is called 1st.
6700  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
6701  2nd can be non-empty only when 1st is not empty.
6702  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
6703  */
6704  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
6705 
6706  enum SECOND_VECTOR_MODE
6707  {
6708  SECOND_VECTOR_EMPTY,
6709  /*
6710  Suballocations in 2nd vector are created later than the ones in 1st, but they
6711  all have smaller offset.
6712  */
6713  SECOND_VECTOR_RING_BUFFER,
6714  /*
6715  Suballocations in 2nd vector are upper side of double stack.
6716  They all have offsets higher than those in 1st vector.
6717  Top of this stack means smaller offsets, but higher indices in this vector.
6718  */
6719  SECOND_VECTOR_DOUBLE_STACK,
6720  };
6721 
6722  VkDeviceSize m_SumFreeSize;
6723  SuballocationVectorType m_Suballocations0, m_Suballocations1;
6724  uint32_t m_1stVectorIndex;
6725  SECOND_VECTOR_MODE m_2ndVectorMode;
6726 
6727  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6728  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6729  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6730  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6731 
6732  // Number of items in 1st vector with hAllocation = null at the beginning.
6733  size_t m_1stNullItemsBeginCount;
6734  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
6735  size_t m_1stNullItemsMiddleCount;
6736  // Number of items in 2nd vector with hAllocation = null.
6737  size_t m_2ndNullItemsCount;
6738 
6739  bool ShouldCompact1st() const;
6740  void CleanupAfterFree();
6741 
6742  bool CreateAllocationRequest_LowerAddress(
6743  uint32_t currentFrameIndex,
6744  uint32_t frameInUseCount,
6745  VkDeviceSize bufferImageGranularity,
6746  VkDeviceSize allocSize,
6747  VkDeviceSize allocAlignment,
6748  VmaSuballocationType allocType,
6749  bool canMakeOtherLost,
6750  uint32_t strategy,
6751  VmaAllocationRequest* pAllocationRequest);
6752  bool CreateAllocationRequest_UpperAddress(
6753  uint32_t currentFrameIndex,
6754  uint32_t frameInUseCount,
6755  VkDeviceSize bufferImageGranularity,
6756  VkDeviceSize allocSize,
6757  VkDeviceSize allocAlignment,
6758  VmaSuballocationType allocType,
6759  bool canMakeOtherLost,
6760  uint32_t strategy,
6761  VmaAllocationRequest* pAllocationRequest);
6762 };
6763 
6764 /*
6765 - GetSize() is the original size of allocated memory block.
6766 - m_UsableSize is this size aligned down to a power of two.
6767  All allocations and calculations happen relative to m_UsableSize.
6768 - GetUnusableSize() is the difference between them.
6769  It is repoted as separate, unused range, not available for allocations.
6770 
6771 Node at level 0 has size = m_UsableSize.
6772 Each next level contains nodes with size 2 times smaller than current level.
6773 m_LevelCount is the maximum number of levels to use in the current object.
6774 */
6775 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6776 {
6777  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6778 public:
6779  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6780  virtual ~VmaBlockMetadata_Buddy();
6781  virtual void Init(VkDeviceSize size);
6782 
6783  virtual bool Validate() const;
6784  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6785  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6786  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6787  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6788 
6789  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6790  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6791 
6792 #if VMA_STATS_STRING_ENABLED
6793  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6794 #endif
6795 
6796  virtual bool CreateAllocationRequest(
6797  uint32_t currentFrameIndex,
6798  uint32_t frameInUseCount,
6799  VkDeviceSize bufferImageGranularity,
6800  VkDeviceSize allocSize,
6801  VkDeviceSize allocAlignment,
6802  bool upperAddress,
6803  VmaSuballocationType allocType,
6804  bool canMakeOtherLost,
6805  uint32_t strategy,
6806  VmaAllocationRequest* pAllocationRequest);
6807 
6808  virtual bool MakeRequestedAllocationsLost(
6809  uint32_t currentFrameIndex,
6810  uint32_t frameInUseCount,
6811  VmaAllocationRequest* pAllocationRequest);
6812 
6813  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6814 
6815  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6816 
6817  virtual void Alloc(
6818  const VmaAllocationRequest& request,
6819  VmaSuballocationType type,
6820  VkDeviceSize allocSize,
6821  VmaAllocation hAllocation);
6822 
6823  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6824  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6825 
6826 private:
6827  static const VkDeviceSize MIN_NODE_SIZE = 32;
6828  static const size_t MAX_LEVELS = 30;
6829 
6830  struct ValidationContext
6831  {
6832  size_t calculatedAllocationCount;
6833  size_t calculatedFreeCount;
6834  VkDeviceSize calculatedSumFreeSize;
6835 
6836  ValidationContext() :
6837  calculatedAllocationCount(0),
6838  calculatedFreeCount(0),
6839  calculatedSumFreeSize(0) { }
6840  };
6841 
6842  struct Node
6843  {
6844  VkDeviceSize offset;
6845  enum TYPE
6846  {
6847  TYPE_FREE,
6848  TYPE_ALLOCATION,
6849  TYPE_SPLIT,
6850  TYPE_COUNT
6851  } type;
6852  Node* parent;
6853  Node* buddy;
6854 
6855  union
6856  {
6857  struct
6858  {
6859  Node* prev;
6860  Node* next;
6861  } free;
6862  struct
6863  {
6864  VmaAllocation alloc;
6865  } allocation;
6866  struct
6867  {
6868  Node* leftChild;
6869  } split;
6870  };
6871  };
6872 
6873  // Size of the memory block aligned down to a power of two.
6874  VkDeviceSize m_UsableSize;
6875  uint32_t m_LevelCount;
6876 
6877  Node* m_Root;
6878  struct {
6879  Node* front;
6880  Node* back;
6881  } m_FreeList[MAX_LEVELS];
6882  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6883  size_t m_AllocationCount;
6884  // Number of nodes in the tree with type == TYPE_FREE.
6885  size_t m_FreeCount;
6886  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6887  VkDeviceSize m_SumFreeSize;
6888 
6889  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6890  void DeleteNode(Node* node);
6891  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6892  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6893  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6894  // Alloc passed just for validation. Can be null.
6895  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6896  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6897  // Adds node to the front of FreeList at given level.
6898  // node->type must be FREE.
6899  // node->free.prev, next can be undefined.
6900  void AddToFreeListFront(uint32_t level, Node* node);
6901  // Removes node from FreeList at given level.
6902  // node->type must be FREE.
6903  // node->free.prev, next stay untouched.
6904  void RemoveFromFreeList(uint32_t level, Node* node);
6905 
6906 #if VMA_STATS_STRING_ENABLED
6907  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6908 #endif
6909 };
6910 
6911 /*
6912 Represents a single block of device memory (`VkDeviceMemory`) with all the
6913 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6914 
6915 Thread-safety: This class must be externally synchronized.
6916 */
6917 class VmaDeviceMemoryBlock
6918 {
6919  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6920 public:
6921  VmaBlockMetadata* m_pMetadata;
6922 
6923  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6924 
6925  ~VmaDeviceMemoryBlock()
6926  {
6927  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6928  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6929  }
6930 
6931  // Always call after construction.
6932  void Init(
6933  VmaAllocator hAllocator,
6934  VmaPool hParentPool,
6935  uint32_t newMemoryTypeIndex,
6936  VkDeviceMemory newMemory,
6937  VkDeviceSize newSize,
6938  uint32_t id,
6939  uint32_t algorithm);
6940  // Always call before destruction.
6941  void Destroy(VmaAllocator allocator);
6942 
6943  VmaPool GetParentPool() const { return m_hParentPool; }
6944  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6945  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6946  uint32_t GetId() const { return m_Id; }
6947  void* GetMappedData() const { return m_pMappedData; }
6948 
6949  // Validates all data structures inside this object. If not valid, returns false.
6950  bool Validate() const;
6951 
6952  VkResult CheckCorruption(VmaAllocator hAllocator);
6953 
6954  // ppData can be null.
6955  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6956  void Unmap(VmaAllocator hAllocator, uint32_t count);
6957 
6958  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6959  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6960 
6961  VkResult BindBufferMemory(
6962  const VmaAllocator hAllocator,
6963  const VmaAllocation hAllocation,
6964  VkDeviceSize allocationLocalOffset,
6965  VkBuffer hBuffer,
6966  const void* pNext);
6967  VkResult BindImageMemory(
6968  const VmaAllocator hAllocator,
6969  const VmaAllocation hAllocation,
6970  VkDeviceSize allocationLocalOffset,
6971  VkImage hImage,
6972  const void* pNext);
6973 
6974 private:
6975  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6976  uint32_t m_MemoryTypeIndex;
6977  uint32_t m_Id;
6978  VkDeviceMemory m_hMemory;
6979 
6980  /*
6981  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6982  Also protects m_MapCount, m_pMappedData.
6983  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6984  */
6985  VMA_MUTEX m_Mutex;
6986  uint32_t m_MapCount;
6987  void* m_pMappedData;
6988 };
6989 
6990 struct VmaPointerLess
6991 {
6992  bool operator()(const void* lhs, const void* rhs) const
6993  {
6994  return lhs < rhs;
6995  }
6996 };
6997 
6998 struct VmaDefragmentationMove
6999 {
7000  size_t srcBlockIndex;
7001  size_t dstBlockIndex;
7002  VkDeviceSize srcOffset;
7003  VkDeviceSize dstOffset;
7004  VkDeviceSize size;
7005  VmaAllocation hAllocation;
7006  VmaDeviceMemoryBlock* pSrcBlock;
7007  VmaDeviceMemoryBlock* pDstBlock;
7008 };
7009 
7010 class VmaDefragmentationAlgorithm;
7011 
7012 /*
7013 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
7014 Vulkan memory type.
7015 
7016 Synchronized internally with a mutex.
7017 */
7018 struct VmaBlockVector
7019 {
7020  VMA_CLASS_NO_COPY(VmaBlockVector)
7021 public:
7022  VmaBlockVector(
7023  VmaAllocator hAllocator,
7024  VmaPool hParentPool,
7025  uint32_t memoryTypeIndex,
7026  VkDeviceSize preferredBlockSize,
7027  size_t minBlockCount,
7028  size_t maxBlockCount,
7029  VkDeviceSize bufferImageGranularity,
7030  uint32_t frameInUseCount,
7031  bool explicitBlockSize,
7032  uint32_t algorithm);
7033  ~VmaBlockVector();
7034 
7035  VkResult CreateMinBlocks();
7036 
7037  VmaAllocator GetAllocator() const { return m_hAllocator; }
7038  VmaPool GetParentPool() const { return m_hParentPool; }
7039  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
7040  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
7041  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
7042  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
7043  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
7044  uint32_t GetAlgorithm() const { return m_Algorithm; }
7045 
7046  void GetPoolStats(VmaPoolStats* pStats);
7047 
7048  bool IsEmpty();
7049  bool IsCorruptionDetectionEnabled() const;
7050 
7051  VkResult Allocate(
7052  uint32_t currentFrameIndex,
7053  VkDeviceSize size,
7054  VkDeviceSize alignment,
7055  const VmaAllocationCreateInfo& createInfo,
7056  VmaSuballocationType suballocType,
7057  size_t allocationCount,
7058  VmaAllocation* pAllocations);
7059 
7060  void Free(const VmaAllocation hAllocation);
7061 
7062  // Adds statistics of this BlockVector to pStats.
7063  void AddStats(VmaStats* pStats);
7064 
7065 #if VMA_STATS_STRING_ENABLED
7066  void PrintDetailedMap(class VmaJsonWriter& json);
7067 #endif
7068 
7069  void MakePoolAllocationsLost(
7070  uint32_t currentFrameIndex,
7071  size_t* pLostAllocationCount);
7072  VkResult CheckCorruption();
7073 
7074  // Saves results in pCtx->res.
7075  void Defragment(
7076  class VmaBlockVectorDefragmentationContext* pCtx,
7078  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
7079  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
7080  VkCommandBuffer commandBuffer);
7081  void DefragmentationEnd(
7082  class VmaBlockVectorDefragmentationContext* pCtx,
7083  uint32_t flags,
7084  VmaDefragmentationStats* pStats);
7085 
7086  uint32_t ProcessDefragmentations(
7087  class VmaBlockVectorDefragmentationContext *pCtx,
7088  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
7089 
7090  void CommitDefragmentations(
7091  class VmaBlockVectorDefragmentationContext *pCtx,
7092  VmaDefragmentationStats* pStats);
7093 
7095  // To be used only while the m_Mutex is locked. Used during defragmentation.
7096 
7097  size_t GetBlockCount() const { return m_Blocks.size(); }
7098  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
7099  size_t CalcAllocationCount() const;
7100  bool IsBufferImageGranularityConflictPossible() const;
7101 
7102 private:
7103  friend class VmaDefragmentationAlgorithm_Generic;
7104 
7105  const VmaAllocator m_hAllocator;
7106  const VmaPool m_hParentPool;
7107  const uint32_t m_MemoryTypeIndex;
7108  const VkDeviceSize m_PreferredBlockSize;
7109  const size_t m_MinBlockCount;
7110  const size_t m_MaxBlockCount;
7111  const VkDeviceSize m_BufferImageGranularity;
7112  const uint32_t m_FrameInUseCount;
7113  const bool m_ExplicitBlockSize;
7114  const uint32_t m_Algorithm;
7115  VMA_RW_MUTEX m_Mutex;
7116 
7117  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
7118  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
7119  bool m_HasEmptyBlock;
7120  // Incrementally sorted by sumFreeSize, ascending.
7121  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
7122  uint32_t m_NextBlockId;
7123 
7124  VkDeviceSize CalcMaxBlockSize() const;
7125 
7126  // Finds and removes given block from vector.
7127  void Remove(VmaDeviceMemoryBlock* pBlock);
7128 
7129  // Performs single step in sorting m_Blocks. They may not be fully sorted
7130  // after this call.
7131  void IncrementallySortBlocks();
7132 
7133  VkResult AllocatePage(
7134  uint32_t currentFrameIndex,
7135  VkDeviceSize size,
7136  VkDeviceSize alignment,
7137  const VmaAllocationCreateInfo& createInfo,
7138  VmaSuballocationType suballocType,
7139  VmaAllocation* pAllocation);
7140 
7141  // To be used only without CAN_MAKE_OTHER_LOST flag.
7142  VkResult AllocateFromBlock(
7143  VmaDeviceMemoryBlock* pBlock,
7144  uint32_t currentFrameIndex,
7145  VkDeviceSize size,
7146  VkDeviceSize alignment,
7147  VmaAllocationCreateFlags allocFlags,
7148  void* pUserData,
7149  VmaSuballocationType suballocType,
7150  uint32_t strategy,
7151  VmaAllocation* pAllocation);
7152 
7153  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
7154 
7155  // Saves result to pCtx->res.
7156  void ApplyDefragmentationMovesCpu(
7157  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7158  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
7159  // Saves result to pCtx->res.
7160  void ApplyDefragmentationMovesGpu(
7161  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7162  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7163  VkCommandBuffer commandBuffer);
7164 
7165  /*
7166  Used during defragmentation. pDefragmentationStats is optional. It's in/out
7167  - updated with new data.
7168  */
7169  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
7170 
7171  void UpdateHasEmptyBlock();
7172 };
7173 
7174 struct VmaPool_T
7175 {
7176  VMA_CLASS_NO_COPY(VmaPool_T)
7177 public:
7178  VmaBlockVector m_BlockVector;
7179 
7180  VmaPool_T(
7181  VmaAllocator hAllocator,
7182  const VmaPoolCreateInfo& createInfo,
7183  VkDeviceSize preferredBlockSize);
7184  ~VmaPool_T();
7185 
7186  uint32_t GetId() const { return m_Id; }
7187  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
7188 
7189  const char* GetName() const { return m_Name; }
7190  void SetName(const char* pName);
7191 
7192 #if VMA_STATS_STRING_ENABLED
7193  //void PrintDetailedMap(class VmaStringBuilder& sb);
7194 #endif
7195 
7196 private:
7197  uint32_t m_Id;
7198  char* m_Name;
7199 };
7200 
7201 /*
7202 Performs defragmentation:
7203 
7204 - Updates `pBlockVector->m_pMetadata`.
7205 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
7206 - Does not move actual data, only returns requested moves as `moves`.
7207 */
7208 class VmaDefragmentationAlgorithm
7209 {
7210  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
7211 public:
7212  VmaDefragmentationAlgorithm(
7213  VmaAllocator hAllocator,
7214  VmaBlockVector* pBlockVector,
7215  uint32_t currentFrameIndex) :
7216  m_hAllocator(hAllocator),
7217  m_pBlockVector(pBlockVector),
7218  m_CurrentFrameIndex(currentFrameIndex)
7219  {
7220  }
7221  virtual ~VmaDefragmentationAlgorithm()
7222  {
7223  }
7224 
7225  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
7226  virtual void AddAll() = 0;
7227 
7228  virtual VkResult Defragment(
7229  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7230  VkDeviceSize maxBytesToMove,
7231  uint32_t maxAllocationsToMove,
7232  VmaDefragmentationFlags flags) = 0;
7233 
7234  virtual VkDeviceSize GetBytesMoved() const = 0;
7235  virtual uint32_t GetAllocationsMoved() const = 0;
7236 
7237 protected:
7238  VmaAllocator const m_hAllocator;
7239  VmaBlockVector* const m_pBlockVector;
7240  const uint32_t m_CurrentFrameIndex;
7241 
7242  struct AllocationInfo
7243  {
7244  VmaAllocation m_hAllocation;
7245  VkBool32* m_pChanged;
7246 
7247  AllocationInfo() :
7248  m_hAllocation(VK_NULL_HANDLE),
7249  m_pChanged(VMA_NULL)
7250  {
7251  }
7252  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
7253  m_hAllocation(hAlloc),
7254  m_pChanged(pChanged)
7255  {
7256  }
7257  };
7258 };
7259 
7260 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
7261 {
7262  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
7263 public:
7264  VmaDefragmentationAlgorithm_Generic(
7265  VmaAllocator hAllocator,
7266  VmaBlockVector* pBlockVector,
7267  uint32_t currentFrameIndex,
7268  bool overlappingMoveSupported);
7269  virtual ~VmaDefragmentationAlgorithm_Generic();
7270 
7271  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7272  virtual void AddAll() { m_AllAllocations = true; }
7273 
7274  virtual VkResult Defragment(
7275  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7276  VkDeviceSize maxBytesToMove,
7277  uint32_t maxAllocationsToMove,
7278  VmaDefragmentationFlags flags);
7279 
7280  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7281  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7282 
7283 private:
7284  uint32_t m_AllocationCount;
7285  bool m_AllAllocations;
7286 
7287  VkDeviceSize m_BytesMoved;
7288  uint32_t m_AllocationsMoved;
7289 
7290  struct AllocationInfoSizeGreater
7291  {
7292  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7293  {
7294  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
7295  }
7296  };
7297 
7298  struct AllocationInfoOffsetGreater
7299  {
7300  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7301  {
7302  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
7303  }
7304  };
7305 
7306  struct BlockInfo
7307  {
7308  size_t m_OriginalBlockIndex;
7309  VmaDeviceMemoryBlock* m_pBlock;
7310  bool m_HasNonMovableAllocations;
7311  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
7312 
7313  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
7314  m_OriginalBlockIndex(SIZE_MAX),
7315  m_pBlock(VMA_NULL),
7316  m_HasNonMovableAllocations(true),
7317  m_Allocations(pAllocationCallbacks)
7318  {
7319  }
7320 
7321  void CalcHasNonMovableAllocations()
7322  {
7323  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
7324  const size_t defragmentAllocCount = m_Allocations.size();
7325  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
7326  }
7327 
7328  void SortAllocationsBySizeDescending()
7329  {
7330  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
7331  }
7332 
7333  void SortAllocationsByOffsetDescending()
7334  {
7335  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
7336  }
7337  };
7338 
7339  struct BlockPointerLess
7340  {
7341  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
7342  {
7343  return pLhsBlockInfo->m_pBlock < pRhsBlock;
7344  }
7345  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7346  {
7347  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
7348  }
7349  };
7350 
7351  // 1. Blocks with some non-movable allocations go first.
7352  // 2. Blocks with smaller sumFreeSize go first.
7353  struct BlockInfoCompareMoveDestination
7354  {
7355  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7356  {
7357  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
7358  {
7359  return true;
7360  }
7361  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
7362  {
7363  return false;
7364  }
7365  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
7366  {
7367  return true;
7368  }
7369  return false;
7370  }
7371  };
7372 
7373  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
7374  BlockInfoVector m_Blocks;
7375 
7376  VkResult DefragmentRound(
7377  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7378  VkDeviceSize maxBytesToMove,
7379  uint32_t maxAllocationsToMove,
7380  bool freeOldAllocations);
7381 
7382  size_t CalcBlocksWithNonMovableCount() const;
7383 
7384  static bool MoveMakesSense(
7385  size_t dstBlockIndex, VkDeviceSize dstOffset,
7386  size_t srcBlockIndex, VkDeviceSize srcOffset);
7387 };
7388 
7389 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
7390 {
7391  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
7392 public:
7393  VmaDefragmentationAlgorithm_Fast(
7394  VmaAllocator hAllocator,
7395  VmaBlockVector* pBlockVector,
7396  uint32_t currentFrameIndex,
7397  bool overlappingMoveSupported);
7398  virtual ~VmaDefragmentationAlgorithm_Fast();
7399 
7400  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
7401  virtual void AddAll() { m_AllAllocations = true; }
7402 
7403  virtual VkResult Defragment(
7404  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7405  VkDeviceSize maxBytesToMove,
7406  uint32_t maxAllocationsToMove,
7407  VmaDefragmentationFlags flags);
7408 
7409  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7410  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7411 
7412 private:
7413  struct BlockInfo
7414  {
7415  size_t origBlockIndex;
7416  };
7417 
7418  class FreeSpaceDatabase
7419  {
7420  public:
7421  FreeSpaceDatabase()
7422  {
7423  FreeSpace s = {};
7424  s.blockInfoIndex = SIZE_MAX;
7425  for(size_t i = 0; i < MAX_COUNT; ++i)
7426  {
7427  m_FreeSpaces[i] = s;
7428  }
7429  }
7430 
7431  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
7432  {
7433  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7434  {
7435  return;
7436  }
7437 
7438  // Find first invalid or the smallest structure.
7439  size_t bestIndex = SIZE_MAX;
7440  for(size_t i = 0; i < MAX_COUNT; ++i)
7441  {
7442  // Empty structure.
7443  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
7444  {
7445  bestIndex = i;
7446  break;
7447  }
7448  if(m_FreeSpaces[i].size < size &&
7449  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
7450  {
7451  bestIndex = i;
7452  }
7453  }
7454 
7455  if(bestIndex != SIZE_MAX)
7456  {
7457  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
7458  m_FreeSpaces[bestIndex].offset = offset;
7459  m_FreeSpaces[bestIndex].size = size;
7460  }
7461  }
7462 
7463  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
7464  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
7465  {
7466  size_t bestIndex = SIZE_MAX;
7467  VkDeviceSize bestFreeSpaceAfter = 0;
7468  for(size_t i = 0; i < MAX_COUNT; ++i)
7469  {
7470  // Structure is valid.
7471  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
7472  {
7473  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
7474  // Allocation fits into this structure.
7475  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
7476  {
7477  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
7478  (dstOffset + size);
7479  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
7480  {
7481  bestIndex = i;
7482  bestFreeSpaceAfter = freeSpaceAfter;
7483  }
7484  }
7485  }
7486  }
7487 
7488  if(bestIndex != SIZE_MAX)
7489  {
7490  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
7491  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
7492 
7493  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7494  {
7495  // Leave this structure for remaining empty space.
7496  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
7497  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
7498  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
7499  }
7500  else
7501  {
7502  // This structure becomes invalid.
7503  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
7504  }
7505 
7506  return true;
7507  }
7508 
7509  return false;
7510  }
7511 
7512  private:
7513  static const size_t MAX_COUNT = 4;
7514 
7515  struct FreeSpace
7516  {
7517  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
7518  VkDeviceSize offset;
7519  VkDeviceSize size;
7520  } m_FreeSpaces[MAX_COUNT];
7521  };
7522 
7523  const bool m_OverlappingMoveSupported;
7524 
7525  uint32_t m_AllocationCount;
7526  bool m_AllAllocations;
7527 
7528  VkDeviceSize m_BytesMoved;
7529  uint32_t m_AllocationsMoved;
7530 
7531  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
7532 
7533  void PreprocessMetadata();
7534  void PostprocessMetadata();
7535  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
7536 };
7537 
7538 struct VmaBlockDefragmentationContext
7539 {
7540  enum BLOCK_FLAG
7541  {
7542  BLOCK_FLAG_USED = 0x00000001,
7543  };
7544  uint32_t flags;
7545  VkBuffer hBuffer;
7546 };
7547 
7548 class VmaBlockVectorDefragmentationContext
7549 {
7550  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
7551 public:
7552  VkResult res;
7553  bool mutexLocked;
7554  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
7555  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
7556  uint32_t defragmentationMovesProcessed;
7557  uint32_t defragmentationMovesCommitted;
7558  bool hasDefragmentationPlan;
7559 
7560  VmaBlockVectorDefragmentationContext(
7561  VmaAllocator hAllocator,
7562  VmaPool hCustomPool, // Optional.
7563  VmaBlockVector* pBlockVector,
7564  uint32_t currFrameIndex);
7565  ~VmaBlockVectorDefragmentationContext();
7566 
7567  VmaPool GetCustomPool() const { return m_hCustomPool; }
7568  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7569  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7570 
7571  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7572  void AddAll() { m_AllAllocations = true; }
7573 
7574  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7575 
7576 private:
7577  const VmaAllocator m_hAllocator;
7578  // Null if not from custom pool.
7579  const VmaPool m_hCustomPool;
7580  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7581  VmaBlockVector* const m_pBlockVector;
7582  const uint32_t m_CurrFrameIndex;
7583  // Owner of this object.
7584  VmaDefragmentationAlgorithm* m_pAlgorithm;
7585 
7586  struct AllocInfo
7587  {
7588  VmaAllocation hAlloc;
7589  VkBool32* pChanged;
7590  };
7591  // Used between constructor and Begin.
7592  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7593  bool m_AllAllocations;
7594 };
7595 
7596 struct VmaDefragmentationContext_T
7597 {
7598 private:
7599  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7600 public:
7601  VmaDefragmentationContext_T(
7602  VmaAllocator hAllocator,
7603  uint32_t currFrameIndex,
7604  uint32_t flags,
7605  VmaDefragmentationStats* pStats);
7606  ~VmaDefragmentationContext_T();
7607 
7608  void AddPools(uint32_t poolCount, const VmaPool* pPools);
7609  void AddAllocations(
7610  uint32_t allocationCount,
7611  const VmaAllocation* pAllocations,
7612  VkBool32* pAllocationsChanged);
7613 
7614  /*
7615  Returns:
7616  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7617  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7618  - Negative value if error occured and object can be destroyed immediately.
7619  */
7620  VkResult Defragment(
7621  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
7622  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
7623  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
7624 
7625  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
7626  VkResult DefragmentPassEnd();
7627 
7628 private:
7629  const VmaAllocator m_hAllocator;
7630  const uint32_t m_CurrFrameIndex;
7631  const uint32_t m_Flags;
7632  VmaDefragmentationStats* const m_pStats;
7633 
7634  VkDeviceSize m_MaxCpuBytesToMove;
7635  uint32_t m_MaxCpuAllocationsToMove;
7636  VkDeviceSize m_MaxGpuBytesToMove;
7637  uint32_t m_MaxGpuAllocationsToMove;
7638 
7639  // Owner of these objects.
7640  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
7641  // Owner of these objects.
7642  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
7643 };
7644 
7645 #if VMA_RECORDING_ENABLED
7646 
7647 class VmaRecorder
7648 {
7649 public:
7650  VmaRecorder();
7651  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
7652  void WriteConfiguration(
7653  const VkPhysicalDeviceProperties& devProps,
7654  const VkPhysicalDeviceMemoryProperties& memProps,
7655  uint32_t vulkanApiVersion,
7656  bool dedicatedAllocationExtensionEnabled,
7657  bool bindMemory2ExtensionEnabled,
7658  bool memoryBudgetExtensionEnabled,
7659  bool deviceCoherentMemoryExtensionEnabled);
7660  ~VmaRecorder();
7661 
7662  void RecordCreateAllocator(uint32_t frameIndex);
7663  void RecordDestroyAllocator(uint32_t frameIndex);
7664  void RecordCreatePool(uint32_t frameIndex,
7665  const VmaPoolCreateInfo& createInfo,
7666  VmaPool pool);
7667  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
7668  void RecordAllocateMemory(uint32_t frameIndex,
7669  const VkMemoryRequirements& vkMemReq,
7670  const VmaAllocationCreateInfo& createInfo,
7671  VmaAllocation allocation);
7672  void RecordAllocateMemoryPages(uint32_t frameIndex,
7673  const VkMemoryRequirements& vkMemReq,
7674  const VmaAllocationCreateInfo& createInfo,
7675  uint64_t allocationCount,
7676  const VmaAllocation* pAllocations);
7677  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
7678  const VkMemoryRequirements& vkMemReq,
7679  bool requiresDedicatedAllocation,
7680  bool prefersDedicatedAllocation,
7681  const VmaAllocationCreateInfo& createInfo,
7682  VmaAllocation allocation);
7683  void RecordAllocateMemoryForImage(uint32_t frameIndex,
7684  const VkMemoryRequirements& vkMemReq,
7685  bool requiresDedicatedAllocation,
7686  bool prefersDedicatedAllocation,
7687  const VmaAllocationCreateInfo& createInfo,
7688  VmaAllocation allocation);
7689  void RecordFreeMemory(uint32_t frameIndex,
7690  VmaAllocation allocation);
7691  void RecordFreeMemoryPages(uint32_t frameIndex,
7692  uint64_t allocationCount,
7693  const VmaAllocation* pAllocations);
7694  void RecordSetAllocationUserData(uint32_t frameIndex,
7695  VmaAllocation allocation,
7696  const void* pUserData);
7697  void RecordCreateLostAllocation(uint32_t frameIndex,
7698  VmaAllocation allocation);
7699  void RecordMapMemory(uint32_t frameIndex,
7700  VmaAllocation allocation);
7701  void RecordUnmapMemory(uint32_t frameIndex,
7702  VmaAllocation allocation);
7703  void RecordFlushAllocation(uint32_t frameIndex,
7704  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7705  void RecordInvalidateAllocation(uint32_t frameIndex,
7706  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7707  void RecordCreateBuffer(uint32_t frameIndex,
7708  const VkBufferCreateInfo& bufCreateInfo,
7709  const VmaAllocationCreateInfo& allocCreateInfo,
7710  VmaAllocation allocation);
7711  void RecordCreateImage(uint32_t frameIndex,
7712  const VkImageCreateInfo& imageCreateInfo,
7713  const VmaAllocationCreateInfo& allocCreateInfo,
7714  VmaAllocation allocation);
7715  void RecordDestroyBuffer(uint32_t frameIndex,
7716  VmaAllocation allocation);
7717  void RecordDestroyImage(uint32_t frameIndex,
7718  VmaAllocation allocation);
7719  void RecordTouchAllocation(uint32_t frameIndex,
7720  VmaAllocation allocation);
7721  void RecordGetAllocationInfo(uint32_t frameIndex,
7722  VmaAllocation allocation);
7723  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
7724  VmaPool pool);
7725  void RecordDefragmentationBegin(uint32_t frameIndex,
7726  const VmaDefragmentationInfo2& info,
7728  void RecordDefragmentationEnd(uint32_t frameIndex,
7730  void RecordSetPoolName(uint32_t frameIndex,
7731  VmaPool pool,
7732  const char* name);
7733 
7734 private:
7735  struct CallParams
7736  {
7737  uint32_t threadId;
7738  double time;
7739  };
7740 
7741  class UserDataString
7742  {
7743  public:
7744  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
7745  const char* GetString() const { return m_Str; }
7746 
7747  private:
7748  char m_PtrStr[17];
7749  const char* m_Str;
7750  };
7751 
7752  bool m_UseMutex;
7753  VmaRecordFlags m_Flags;
7754  FILE* m_File;
7755  VMA_MUTEX m_FileMutex;
7756  std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
7757 
7758  void GetBasicParams(CallParams& outParams);
7759 
7760  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
7761  template<typename T>
7762  void PrintPointerList(uint64_t count, const T* pItems)
7763  {
7764  if(count)
7765  {
7766  fprintf(m_File, "%p", pItems[0]);
7767  for(uint64_t i = 1; i < count; ++i)
7768  {
7769  fprintf(m_File, " %p", pItems[i]);
7770  }
7771  }
7772  }
7773 
7774  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
7775  void Flush();
7776 };
7777 
7778 #endif // #if VMA_RECORDING_ENABLED
7779 
7780 /*
7781 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
7782 */
7783 class VmaAllocationObjectAllocator
7784 {
7785  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
7786 public:
7787  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
7788 
7789  template<typename... Types> VmaAllocation Allocate(Types... args);
7790  void Free(VmaAllocation hAlloc);
7791 
7792 private:
7793  VMA_MUTEX m_Mutex;
7794  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7795 };
7796 
7797 struct VmaCurrentBudgetData
7798 {
7799  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7800  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7801 
7802 #if VMA_MEMORY_BUDGET
7803  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7804  VMA_RW_MUTEX m_BudgetMutex;
7805  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7806  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7807  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7808 #endif // #if VMA_MEMORY_BUDGET
7809 
7810  VmaCurrentBudgetData()
7811  {
7812  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7813  {
7814  m_BlockBytes[heapIndex] = 0;
7815  m_AllocationBytes[heapIndex] = 0;
7816 #if VMA_MEMORY_BUDGET
7817  m_VulkanUsage[heapIndex] = 0;
7818  m_VulkanBudget[heapIndex] = 0;
7819  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7820 #endif
7821  }
7822 
7823 #if VMA_MEMORY_BUDGET
7824  m_OperationsSinceBudgetFetch = 0;
7825 #endif
7826  }
7827 
7828  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7829  {
7830  m_AllocationBytes[heapIndex] += allocationSize;
7831 #if VMA_MEMORY_BUDGET
7832  ++m_OperationsSinceBudgetFetch;
7833 #endif
7834  }
7835 
7836  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7837  {
7838  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7839  m_AllocationBytes[heapIndex] -= allocationSize;
7840 #if VMA_MEMORY_BUDGET
7841  ++m_OperationsSinceBudgetFetch;
7842 #endif
7843  }
7844 };
7845 
7846 // Main allocator object.
7847 struct VmaAllocator_T
7848 {
7849  VMA_CLASS_NO_COPY(VmaAllocator_T)
7850 public:
7851  bool m_UseMutex;
7852  uint32_t m_VulkanApiVersion;
7853  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7854  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7855  bool m_UseExtMemoryBudget;
7856  bool m_UseAmdDeviceCoherentMemory;
7857  bool m_UseKhrBufferDeviceAddress;
7858  VkDevice m_hDevice;
7859  VkInstance m_hInstance;
7860  bool m_AllocationCallbacksSpecified;
7861  VkAllocationCallbacks m_AllocationCallbacks;
7862  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7863  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7864 
7865  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7866  uint32_t m_HeapSizeLimitMask;
7867 
7868  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7869  VkPhysicalDeviceMemoryProperties m_MemProps;
7870 
7871  // Default pools.
7872  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7873 
7874  // Each vector is sorted by memory (handle value).
7875  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7876  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7877  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7878 
7879  VmaCurrentBudgetData m_Budget;
7880 
7881  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7882  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7883  ~VmaAllocator_T();
7884 
7885  const VkAllocationCallbacks* GetAllocationCallbacks() const
7886  {
7887  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7888  }
7889  const VmaVulkanFunctions& GetVulkanFunctions() const
7890  {
7891  return m_VulkanFunctions;
7892  }
7893 
7894  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
7895 
7896  VkDeviceSize GetBufferImageGranularity() const
7897  {
7898  return VMA_MAX(
7899  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7900  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7901  }
7902 
7903  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7904  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7905 
7906  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7907  {
7908  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7909  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7910  }
7911  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7912  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7913  {
7914  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7915  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7916  }
7917  // Minimum alignment for all allocations in specific memory type.
7918  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7919  {
7920  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7921  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7922  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7923  }
7924 
7925  bool IsIntegratedGpu() const
7926  {
7927  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7928  }
7929 
7930  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
7931 
7932 #if VMA_RECORDING_ENABLED
7933  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7934 #endif
7935 
7936  void GetBufferMemoryRequirements(
7937  VkBuffer hBuffer,
7938  VkMemoryRequirements& memReq,
7939  bool& requiresDedicatedAllocation,
7940  bool& prefersDedicatedAllocation) const;
7941  void GetImageMemoryRequirements(
7942  VkImage hImage,
7943  VkMemoryRequirements& memReq,
7944  bool& requiresDedicatedAllocation,
7945  bool& prefersDedicatedAllocation) const;
7946 
7947  // Main allocation function.
7948  VkResult AllocateMemory(
7949  const VkMemoryRequirements& vkMemReq,
7950  bool requiresDedicatedAllocation,
7951  bool prefersDedicatedAllocation,
7952  VkBuffer dedicatedBuffer,
7953  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
7954  VkImage dedicatedImage,
7955  const VmaAllocationCreateInfo& createInfo,
7956  VmaSuballocationType suballocType,
7957  size_t allocationCount,
7958  VmaAllocation* pAllocations);
7959 
7960  // Main deallocation function.
7961  void FreeMemory(
7962  size_t allocationCount,
7963  const VmaAllocation* pAllocations);
7964 
7965  VkResult ResizeAllocation(
7966  const VmaAllocation alloc,
7967  VkDeviceSize newSize);
7968 
7969  void CalculateStats(VmaStats* pStats);
7970 
7971  void GetBudget(
7972  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7973 
7974 #if VMA_STATS_STRING_ENABLED
7975  void PrintDetailedMap(class VmaJsonWriter& json);
7976 #endif
7977 
7978  VkResult DefragmentationBegin(
7979  const VmaDefragmentationInfo2& info,
7980  VmaDefragmentationStats* pStats,
7981  VmaDefragmentationContext* pContext);
7982  VkResult DefragmentationEnd(
7983  VmaDefragmentationContext context);
7984 
7985  VkResult DefragmentationPassBegin(
7987  VmaDefragmentationContext context);
7988  VkResult DefragmentationPassEnd(
7989  VmaDefragmentationContext context);
7990 
7991  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7992  bool TouchAllocation(VmaAllocation hAllocation);
7993 
7994  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7995  void DestroyPool(VmaPool pool);
7996  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7997 
7998  void SetCurrentFrameIndex(uint32_t frameIndex);
7999  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
8000 
8001  void MakePoolAllocationsLost(
8002  VmaPool hPool,
8003  size_t* pLostAllocationCount);
8004  VkResult CheckPoolCorruption(VmaPool hPool);
8005  VkResult CheckCorruption(uint32_t memoryTypeBits);
8006 
8007  void CreateLostAllocation(VmaAllocation* pAllocation);
8008 
8009  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
8010  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
8011  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
8012  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
8013  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
8014  VkResult BindVulkanBuffer(
8015  VkDeviceMemory memory,
8016  VkDeviceSize memoryOffset,
8017  VkBuffer buffer,
8018  const void* pNext);
8019  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
8020  VkResult BindVulkanImage(
8021  VkDeviceMemory memory,
8022  VkDeviceSize memoryOffset,
8023  VkImage image,
8024  const void* pNext);
8025 
8026  VkResult Map(VmaAllocation hAllocation, void** ppData);
8027  void Unmap(VmaAllocation hAllocation);
8028 
8029  VkResult BindBufferMemory(
8030  VmaAllocation hAllocation,
8031  VkDeviceSize allocationLocalOffset,
8032  VkBuffer hBuffer,
8033  const void* pNext);
8034  VkResult BindImageMemory(
8035  VmaAllocation hAllocation,
8036  VkDeviceSize allocationLocalOffset,
8037  VkImage hImage,
8038  const void* pNext);
8039 
8040  VkResult FlushOrInvalidateAllocation(
8041  VmaAllocation hAllocation,
8042  VkDeviceSize offset, VkDeviceSize size,
8043  VMA_CACHE_OPERATION op);
8044  VkResult FlushOrInvalidateAllocations(
8045  uint32_t allocationCount,
8046  const VmaAllocation* allocations,
8047  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
8048  VMA_CACHE_OPERATION op);
8049 
8050  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
8051 
8052  /*
8053  Returns bit mask of memory types that can support defragmentation on GPU as
8054  they support creation of required buffer for copy operations.
8055  */
8056  uint32_t GetGpuDefragmentationMemoryTypeBits();
8057 
8058 private:
8059  VkDeviceSize m_PreferredLargeHeapBlockSize;
8060 
8061  VkPhysicalDevice m_PhysicalDevice;
8062  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
8063  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
8064 
8065  VMA_RW_MUTEX m_PoolsMutex;
8066  // Protected by m_PoolsMutex. Sorted by pointer value.
8067  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
8068  uint32_t m_NextPoolId;
8069 
8070  VmaVulkanFunctions m_VulkanFunctions;
8071 
8072  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
8073  uint32_t m_GlobalMemoryTypeBits;
8074 
8075 #if VMA_RECORDING_ENABLED
8076  VmaRecorder* m_pRecorder;
8077 #endif
8078 
8079  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
8080 
8081 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
8082  void ImportVulkanFunctions_Static();
8083 #endif
8084 
8085  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
8086 
8087 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
8088  void ImportVulkanFunctions_Dynamic();
8089 #endif
8090 
8091  void ValidateVulkanFunctions();
8092 
8093  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
8094 
8095  VkResult AllocateMemoryOfType(
8096  VkDeviceSize size,
8097  VkDeviceSize alignment,
8098  bool dedicatedAllocation,
8099  VkBuffer dedicatedBuffer,
8100  VkBufferUsageFlags dedicatedBufferUsage,
8101  VkImage dedicatedImage,
8102  const VmaAllocationCreateInfo& createInfo,
8103  uint32_t memTypeIndex,
8104  VmaSuballocationType suballocType,
8105  size_t allocationCount,
8106  VmaAllocation* pAllocations);
8107 
8108  // Helper function only to be used inside AllocateDedicatedMemory.
8109  VkResult AllocateDedicatedMemoryPage(
8110  VkDeviceSize size,
8111  VmaSuballocationType suballocType,
8112  uint32_t memTypeIndex,
8113  const VkMemoryAllocateInfo& allocInfo,
8114  bool map,
8115  bool isUserDataString,
8116  void* pUserData,
8117  VmaAllocation* pAllocation);
8118 
8119  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
8120  VkResult AllocateDedicatedMemory(
8121  VkDeviceSize size,
8122  VmaSuballocationType suballocType,
8123  uint32_t memTypeIndex,
8124  bool withinBudget,
8125  bool map,
8126  bool isUserDataString,
8127  void* pUserData,
8128  VkBuffer dedicatedBuffer,
8129  VkBufferUsageFlags dedicatedBufferUsage,
8130  VkImage dedicatedImage,
8131  size_t allocationCount,
8132  VmaAllocation* pAllocations);
8133 
8134  void FreeDedicatedMemory(const VmaAllocation allocation);
8135 
8136  /*
8137  Calculates and returns bit mask of memory types that can support defragmentation
8138  on GPU as they support creation of required buffer for copy operations.
8139  */
8140  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
8141 
8142  uint32_t CalculateGlobalMemoryTypeBits() const;
8143 
8144  bool GetFlushOrInvalidateRange(
8145  VmaAllocation allocation,
8146  VkDeviceSize offset, VkDeviceSize size,
8147  VkMappedMemoryRange& outRange) const;
8148 
8149 #if VMA_MEMORY_BUDGET
8150  void UpdateVulkanBudget();
8151 #endif // #if VMA_MEMORY_BUDGET
8152 };
8153 
8155 // Memory allocation #2 after VmaAllocator_T definition
8156 
8157 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
8158 {
8159  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
8160 }
8161 
8162 static void VmaFree(VmaAllocator hAllocator, void* ptr)
8163 {
8164  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
8165 }
8166 
8167 template<typename T>
8168 static T* VmaAllocate(VmaAllocator hAllocator)
8169 {
8170  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
8171 }
8172 
8173 template<typename T>
8174 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
8175 {
8176  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
8177 }
8178 
8179 template<typename T>
8180 static void vma_delete(VmaAllocator hAllocator, T* ptr)
8181 {
8182  if(ptr != VMA_NULL)
8183  {
8184  ptr->~T();
8185  VmaFree(hAllocator, ptr);
8186  }
8187 }
8188 
8189 template<typename T>
8190 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
8191 {
8192  if(ptr != VMA_NULL)
8193  {
8194  for(size_t i = count; i--; )
8195  ptr[i].~T();
8196  VmaFree(hAllocator, ptr);
8197  }
8198 }
8199 
8201 // VmaStringBuilder
8202 
8203 #if VMA_STATS_STRING_ENABLED
8204 
8205 class VmaStringBuilder
8206 {
8207 public:
8208  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
8209  size_t GetLength() const { return m_Data.size(); }
8210  const char* GetData() const { return m_Data.data(); }
8211 
8212  void Add(char ch) { m_Data.push_back(ch); }
8213  void Add(const char* pStr);
8214  void AddNewLine() { Add('\n'); }
8215  void AddNumber(uint32_t num);
8216  void AddNumber(uint64_t num);
8217  void AddPointer(const void* ptr);
8218 
8219 private:
8220  VmaVector< char, VmaStlAllocator<char> > m_Data;
8221 };
8222 
8223 void VmaStringBuilder::Add(const char* pStr)
8224 {
8225  const size_t strLen = strlen(pStr);
8226  if(strLen > 0)
8227  {
8228  const size_t oldCount = m_Data.size();
8229  m_Data.resize(oldCount + strLen);
8230  memcpy(m_Data.data() + oldCount, pStr, strLen);
8231  }
8232 }
8233 
8234 void VmaStringBuilder::AddNumber(uint32_t num)
8235 {
8236  char buf[11];
8237  buf[10] = '\0';
8238  char *p = &buf[10];
8239  do
8240  {
8241  *--p = '0' + (num % 10);
8242  num /= 10;
8243  }
8244  while(num);
8245  Add(p);
8246 }
8247 
8248 void VmaStringBuilder::AddNumber(uint64_t num)
8249 {
8250  char buf[21];
8251  buf[20] = '\0';
8252  char *p = &buf[20];
8253  do
8254  {
8255  *--p = '0' + (num % 10);
8256  num /= 10;
8257  }
8258  while(num);
8259  Add(p);
8260 }
8261 
8262 void VmaStringBuilder::AddPointer(const void* ptr)
8263 {
8264  char buf[21];
8265  VmaPtrToStr(buf, sizeof(buf), ptr);
8266  Add(buf);
8267 }
8268 
8269 #endif // #if VMA_STATS_STRING_ENABLED
8270 
8272 // VmaJsonWriter
8273 
8274 #if VMA_STATS_STRING_ENABLED
8275 
8276 class VmaJsonWriter
8277 {
8278  VMA_CLASS_NO_COPY(VmaJsonWriter)
8279 public:
8280  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
8281  ~VmaJsonWriter();
8282 
8283  void BeginObject(bool singleLine = false);
8284  void EndObject();
8285 
8286  void BeginArray(bool singleLine = false);
8287  void EndArray();
8288 
8289  void WriteString(const char* pStr);
8290  void BeginString(const char* pStr = VMA_NULL);
8291  void ContinueString(const char* pStr);
8292  void ContinueString(uint32_t n);
8293  void ContinueString(uint64_t n);
8294  void ContinueString_Pointer(const void* ptr);
8295  void EndString(const char* pStr = VMA_NULL);
8296 
8297  void WriteNumber(uint32_t n);
8298  void WriteNumber(uint64_t n);
8299  void WriteBool(bool b);
8300  void WriteNull();
8301 
8302 private:
8303  static const char* const INDENT;
8304 
8305  enum COLLECTION_TYPE
8306  {
8307  COLLECTION_TYPE_OBJECT,
8308  COLLECTION_TYPE_ARRAY,
8309  };
8310  struct StackItem
8311  {
8312  COLLECTION_TYPE type;
8313  uint32_t valueCount;
8314  bool singleLineMode;
8315  };
8316 
8317  VmaStringBuilder& m_SB;
8318  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
8319  bool m_InsideString;
8320 
8321  void BeginValue(bool isString);
8322  void WriteIndent(bool oneLess = false);
8323 };
8324 
8325 const char* const VmaJsonWriter::INDENT = " ";
8326 
8327 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
8328  m_SB(sb),
8329  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
8330  m_InsideString(false)
8331 {
8332 }
8333 
8334 VmaJsonWriter::~VmaJsonWriter()
8335 {
8336  VMA_ASSERT(!m_InsideString);
8337  VMA_ASSERT(m_Stack.empty());
8338 }
8339 
8340 void VmaJsonWriter::BeginObject(bool singleLine)
8341 {
8342  VMA_ASSERT(!m_InsideString);
8343 
8344  BeginValue(false);
8345  m_SB.Add('{');
8346 
8347  StackItem item;
8348  item.type = COLLECTION_TYPE_OBJECT;
8349  item.valueCount = 0;
8350  item.singleLineMode = singleLine;
8351  m_Stack.push_back(item);
8352 }
8353 
8354 void VmaJsonWriter::EndObject()
8355 {
8356  VMA_ASSERT(!m_InsideString);
8357 
8358  WriteIndent(true);
8359  m_SB.Add('}');
8360 
8361  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
8362  m_Stack.pop_back();
8363 }
8364 
8365 void VmaJsonWriter::BeginArray(bool singleLine)
8366 {
8367  VMA_ASSERT(!m_InsideString);
8368 
8369  BeginValue(false);
8370  m_SB.Add('[');
8371 
8372  StackItem item;
8373  item.type = COLLECTION_TYPE_ARRAY;
8374  item.valueCount = 0;
8375  item.singleLineMode = singleLine;
8376  m_Stack.push_back(item);
8377 }
8378 
8379 void VmaJsonWriter::EndArray()
8380 {
8381  VMA_ASSERT(!m_InsideString);
8382 
8383  WriteIndent(true);
8384  m_SB.Add(']');
8385 
8386  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
8387  m_Stack.pop_back();
8388 }
8389 
8390 void VmaJsonWriter::WriteString(const char* pStr)
8391 {
8392  BeginString(pStr);
8393  EndString();
8394 }
8395 
8396 void VmaJsonWriter::BeginString(const char* pStr)
8397 {
8398  VMA_ASSERT(!m_InsideString);
8399 
8400  BeginValue(true);
8401  m_SB.Add('"');
8402  m_InsideString = true;
8403  if(pStr != VMA_NULL && pStr[0] != '\0')
8404  {
8405  ContinueString(pStr);
8406  }
8407 }
8408 
8409 void VmaJsonWriter::ContinueString(const char* pStr)
8410 {
8411  VMA_ASSERT(m_InsideString);
8412 
8413  const size_t strLen = strlen(pStr);
8414  for(size_t i = 0; i < strLen; ++i)
8415  {
8416  char ch = pStr[i];
8417  if(ch == '\\')
8418  {
8419  m_SB.Add("\\\\");
8420  }
8421  else if(ch == '"')
8422  {
8423  m_SB.Add("\\\"");
8424  }
8425  else if(ch >= 32)
8426  {
8427  m_SB.Add(ch);
8428  }
8429  else switch(ch)
8430  {
8431  case '\b':
8432  m_SB.Add("\\b");
8433  break;
8434  case '\f':
8435  m_SB.Add("\\f");
8436  break;
8437  case '\n':
8438  m_SB.Add("\\n");
8439  break;
8440  case '\r':
8441  m_SB.Add("\\r");
8442  break;
8443  case '\t':
8444  m_SB.Add("\\t");
8445  break;
8446  default:
8447  VMA_ASSERT(0 && "Character not currently supported.");
8448  break;
8449  }
8450  }
8451 }
8452 
8453 void VmaJsonWriter::ContinueString(uint32_t n)
8454 {
8455  VMA_ASSERT(m_InsideString);
8456  m_SB.AddNumber(n);
8457 }
8458 
8459 void VmaJsonWriter::ContinueString(uint64_t n)
8460 {
8461  VMA_ASSERT(m_InsideString);
8462  m_SB.AddNumber(n);
8463 }
8464 
8465 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
8466 {
8467  VMA_ASSERT(m_InsideString);
8468  m_SB.AddPointer(ptr);
8469 }
8470 
8471 void VmaJsonWriter::EndString(const char* pStr)
8472 {
8473  VMA_ASSERT(m_InsideString);
8474  if(pStr != VMA_NULL && pStr[0] != '\0')
8475  {
8476  ContinueString(pStr);
8477  }
8478  m_SB.Add('"');
8479  m_InsideString = false;
8480 }
8481 
8482 void VmaJsonWriter::WriteNumber(uint32_t n)
8483 {
8484  VMA_ASSERT(!m_InsideString);
8485  BeginValue(false);
8486  m_SB.AddNumber(n);
8487 }
8488 
8489 void VmaJsonWriter::WriteNumber(uint64_t n)
8490 {
8491  VMA_ASSERT(!m_InsideString);
8492  BeginValue(false);
8493  m_SB.AddNumber(n);
8494 }
8495 
8496 void VmaJsonWriter::WriteBool(bool b)
8497 {
8498  VMA_ASSERT(!m_InsideString);
8499  BeginValue(false);
8500  m_SB.Add(b ? "true" : "false");
8501 }
8502 
8503 void VmaJsonWriter::WriteNull()
8504 {
8505  VMA_ASSERT(!m_InsideString);
8506  BeginValue(false);
8507  m_SB.Add("null");
8508 }
8509 
8510 void VmaJsonWriter::BeginValue(bool isString)
8511 {
8512  if(!m_Stack.empty())
8513  {
8514  StackItem& currItem = m_Stack.back();
8515  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8516  currItem.valueCount % 2 == 0)
8517  {
8518  VMA_ASSERT(isString);
8519  }
8520 
8521  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8522  currItem.valueCount % 2 != 0)
8523  {
8524  m_SB.Add(": ");
8525  }
8526  else if(currItem.valueCount > 0)
8527  {
8528  m_SB.Add(", ");
8529  WriteIndent();
8530  }
8531  else
8532  {
8533  WriteIndent();
8534  }
8535  ++currItem.valueCount;
8536  }
8537 }
8538 
8539 void VmaJsonWriter::WriteIndent(bool oneLess)
8540 {
8541  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
8542  {
8543  m_SB.AddNewLine();
8544 
8545  size_t count = m_Stack.size();
8546  if(count > 0 && oneLess)
8547  {
8548  --count;
8549  }
8550  for(size_t i = 0; i < count; ++i)
8551  {
8552  m_SB.Add(INDENT);
8553  }
8554  }
8555 }
8556 
8557 #endif // #if VMA_STATS_STRING_ENABLED
8558 
8560 
8561 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
8562 {
8563  if(IsUserDataString())
8564  {
8565  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
8566 
8567  FreeUserDataString(hAllocator);
8568 
8569  if(pUserData != VMA_NULL)
8570  {
8571  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
8572  }
8573  }
8574  else
8575  {
8576  m_pUserData = pUserData;
8577  }
8578 }
8579 
8580 void VmaAllocation_T::ChangeBlockAllocation(
8581  VmaAllocator hAllocator,
8582  VmaDeviceMemoryBlock* block,
8583  VkDeviceSize offset)
8584 {
8585  VMA_ASSERT(block != VMA_NULL);
8586  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8587 
8588  // Move mapping reference counter from old block to new block.
8589  if(block != m_BlockAllocation.m_Block)
8590  {
8591  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8592  if(IsPersistentMap())
8593  ++mapRefCount;
8594  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8595  block->Map(hAllocator, mapRefCount, VMA_NULL);
8596  }
8597 
8598  m_BlockAllocation.m_Block = block;
8599  m_BlockAllocation.m_Offset = offset;
8600 }
8601 
8602 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8603 {
8604  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8605  m_BlockAllocation.m_Offset = newOffset;
8606 }
8607 
8608 VkDeviceSize VmaAllocation_T::GetOffset() const
8609 {
8610  switch(m_Type)
8611  {
8612  case ALLOCATION_TYPE_BLOCK:
8613  return m_BlockAllocation.m_Offset;
8614  case ALLOCATION_TYPE_DEDICATED:
8615  return 0;
8616  default:
8617  VMA_ASSERT(0);
8618  return 0;
8619  }
8620 }
8621 
8622 VkDeviceMemory VmaAllocation_T::GetMemory() const
8623 {
8624  switch(m_Type)
8625  {
8626  case ALLOCATION_TYPE_BLOCK:
8627  return m_BlockAllocation.m_Block->GetDeviceMemory();
8628  case ALLOCATION_TYPE_DEDICATED:
8629  return m_DedicatedAllocation.m_hMemory;
8630  default:
8631  VMA_ASSERT(0);
8632  return VK_NULL_HANDLE;
8633  }
8634 }
8635 
8636 void* VmaAllocation_T::GetMappedData() const
8637 {
8638  switch(m_Type)
8639  {
8640  case ALLOCATION_TYPE_BLOCK:
8641  if(m_MapCount != 0)
8642  {
8643  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
8644  VMA_ASSERT(pBlockData != VMA_NULL);
8645  return (char*)pBlockData + m_BlockAllocation.m_Offset;
8646  }
8647  else
8648  {
8649  return VMA_NULL;
8650  }
8651  break;
8652  case ALLOCATION_TYPE_DEDICATED:
8653  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
8654  return m_DedicatedAllocation.m_pMappedData;
8655  default:
8656  VMA_ASSERT(0);
8657  return VMA_NULL;
8658  }
8659 }
8660 
8661 bool VmaAllocation_T::CanBecomeLost() const
8662 {
8663  switch(m_Type)
8664  {
8665  case ALLOCATION_TYPE_BLOCK:
8666  return m_BlockAllocation.m_CanBecomeLost;
8667  case ALLOCATION_TYPE_DEDICATED:
8668  return false;
8669  default:
8670  VMA_ASSERT(0);
8671  return false;
8672  }
8673 }
8674 
8675 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8676 {
8677  VMA_ASSERT(CanBecomeLost());
8678 
8679  /*
8680  Warning: This is a carefully designed algorithm.
8681  Do not modify unless you really know what you're doing :)
8682  */
8683  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
8684  for(;;)
8685  {
8686  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8687  {
8688  VMA_ASSERT(0);
8689  return false;
8690  }
8691  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
8692  {
8693  return false;
8694  }
8695  else // Last use time earlier than current time.
8696  {
8697  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
8698  {
8699  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
8700  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
8701  return true;
8702  }
8703  }
8704  }
8705 }
8706 
8707 #if VMA_STATS_STRING_ENABLED
8708 
8709 // Correspond to values of enum VmaSuballocationType.
8710 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
8711  "FREE",
8712  "UNKNOWN",
8713  "BUFFER",
8714  "IMAGE_UNKNOWN",
8715  "IMAGE_LINEAR",
8716  "IMAGE_OPTIMAL",
8717 };
8718 
8719 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
8720 {
8721  json.WriteString("Type");
8722  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
8723 
8724  json.WriteString("Size");
8725  json.WriteNumber(m_Size);
8726 
8727  if(m_pUserData != VMA_NULL)
8728  {
8729  json.WriteString("UserData");
8730  if(IsUserDataString())
8731  {
8732  json.WriteString((const char*)m_pUserData);
8733  }
8734  else
8735  {
8736  json.BeginString();
8737  json.ContinueString_Pointer(m_pUserData);
8738  json.EndString();
8739  }
8740  }
8741 
8742  json.WriteString("CreationFrameIndex");
8743  json.WriteNumber(m_CreationFrameIndex);
8744 
8745  json.WriteString("LastUseFrameIndex");
8746  json.WriteNumber(GetLastUseFrameIndex());
8747 
8748  if(m_BufferImageUsage != 0)
8749  {
8750  json.WriteString("Usage");
8751  json.WriteNumber(m_BufferImageUsage);
8752  }
8753 }
8754 
8755 #endif
8756 
8757 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
8758 {
8759  VMA_ASSERT(IsUserDataString());
8760  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
8761  m_pUserData = VMA_NULL;
8762 }
8763 
8764 void VmaAllocation_T::BlockAllocMap()
8765 {
8766  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8767 
8768  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8769  {
8770  ++m_MapCount;
8771  }
8772  else
8773  {
8774  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
8775  }
8776 }
8777 
8778 void VmaAllocation_T::BlockAllocUnmap()
8779 {
8780  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8781 
8782  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8783  {
8784  --m_MapCount;
8785  }
8786  else
8787  {
8788  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
8789  }
8790 }
8791 
8792 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
8793 {
8794  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8795 
8796  if(m_MapCount != 0)
8797  {
8798  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8799  {
8800  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
8801  *ppData = m_DedicatedAllocation.m_pMappedData;
8802  ++m_MapCount;
8803  return VK_SUCCESS;
8804  }
8805  else
8806  {
8807  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
8808  return VK_ERROR_MEMORY_MAP_FAILED;
8809  }
8810  }
8811  else
8812  {
8813  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
8814  hAllocator->m_hDevice,
8815  m_DedicatedAllocation.m_hMemory,
8816  0, // offset
8817  VK_WHOLE_SIZE,
8818  0, // flags
8819  ppData);
8820  if(result == VK_SUCCESS)
8821  {
8822  m_DedicatedAllocation.m_pMappedData = *ppData;
8823  m_MapCount = 1;
8824  }
8825  return result;
8826  }
8827 }
8828 
8829 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
8830 {
8831  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8832 
8833  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8834  {
8835  --m_MapCount;
8836  if(m_MapCount == 0)
8837  {
8838  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8839  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8840  hAllocator->m_hDevice,
8841  m_DedicatedAllocation.m_hMemory);
8842  }
8843  }
8844  else
8845  {
8846  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8847  }
8848 }
8849 
8850 #if VMA_STATS_STRING_ENABLED
8851 
8852 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8853 {
8854  json.BeginObject();
8855 
8856  json.WriteString("Blocks");
8857  json.WriteNumber(stat.blockCount);
8858 
8859  json.WriteString("Allocations");
8860  json.WriteNumber(stat.allocationCount);
8861 
8862  json.WriteString("UnusedRanges");
8863  json.WriteNumber(stat.unusedRangeCount);
8864 
8865  json.WriteString("UsedBytes");
8866  json.WriteNumber(stat.usedBytes);
8867 
8868  json.WriteString("UnusedBytes");
8869  json.WriteNumber(stat.unusedBytes);
8870 
8871  if(stat.allocationCount > 1)
8872  {
8873  json.WriteString("AllocationSize");
8874  json.BeginObject(true);
8875  json.WriteString("Min");
8876  json.WriteNumber(stat.allocationSizeMin);
8877  json.WriteString("Avg");
8878  json.WriteNumber(stat.allocationSizeAvg);
8879  json.WriteString("Max");
8880  json.WriteNumber(stat.allocationSizeMax);
8881  json.EndObject();
8882  }
8883 
8884  if(stat.unusedRangeCount > 1)
8885  {
8886  json.WriteString("UnusedRangeSize");
8887  json.BeginObject(true);
8888  json.WriteString("Min");
8889  json.WriteNumber(stat.unusedRangeSizeMin);
8890  json.WriteString("Avg");
8891  json.WriteNumber(stat.unusedRangeSizeAvg);
8892  json.WriteString("Max");
8893  json.WriteNumber(stat.unusedRangeSizeMax);
8894  json.EndObject();
8895  }
8896 
8897  json.EndObject();
8898 }
8899 
8900 #endif // #if VMA_STATS_STRING_ENABLED
8901 
8902 struct VmaSuballocationItemSizeLess
8903 {
8904  bool operator()(
8905  const VmaSuballocationList::iterator lhs,
8906  const VmaSuballocationList::iterator rhs) const
8907  {
8908  return lhs->size < rhs->size;
8909  }
8910  bool operator()(
8911  const VmaSuballocationList::iterator lhs,
8912  VkDeviceSize rhsSize) const
8913  {
8914  return lhs->size < rhsSize;
8915  }
8916 };
8917 
8918 
8920 // class VmaBlockMetadata
8921 
8922 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8923  m_Size(0),
8924  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8925 {
8926 }
8927 
8928 #if VMA_STATS_STRING_ENABLED
8929 
8930 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8931  VkDeviceSize unusedBytes,
8932  size_t allocationCount,
8933  size_t unusedRangeCount) const
8934 {
8935  json.BeginObject();
8936 
8937  json.WriteString("TotalBytes");
8938  json.WriteNumber(GetSize());
8939 
8940  json.WriteString("UnusedBytes");
8941  json.WriteNumber(unusedBytes);
8942 
8943  json.WriteString("Allocations");
8944  json.WriteNumber((uint64_t)allocationCount);
8945 
8946  json.WriteString("UnusedRanges");
8947  json.WriteNumber((uint64_t)unusedRangeCount);
8948 
8949  json.WriteString("Suballocations");
8950  json.BeginArray();
8951 }
8952 
8953 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8954  VkDeviceSize offset,
8955  VmaAllocation hAllocation) const
8956 {
8957  json.BeginObject(true);
8958 
8959  json.WriteString("Offset");
8960  json.WriteNumber(offset);
8961 
8962  hAllocation->PrintParameters(json);
8963 
8964  json.EndObject();
8965 }
8966 
8967 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8968  VkDeviceSize offset,
8969  VkDeviceSize size) const
8970 {
8971  json.BeginObject(true);
8972 
8973  json.WriteString("Offset");
8974  json.WriteNumber(offset);
8975 
8976  json.WriteString("Type");
8977  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8978 
8979  json.WriteString("Size");
8980  json.WriteNumber(size);
8981 
8982  json.EndObject();
8983 }
8984 
8985 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8986 {
8987  json.EndArray();
8988  json.EndObject();
8989 }
8990 
8991 #endif // #if VMA_STATS_STRING_ENABLED
8992 
8994 // class VmaBlockMetadata_Generic
8995 
8996 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8997  VmaBlockMetadata(hAllocator),
8998  m_FreeCount(0),
8999  m_SumFreeSize(0),
9000  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9001  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
9002 {
9003 }
9004 
9005 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
9006 {
9007 }
9008 
9009 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
9010 {
9011  VmaBlockMetadata::Init(size);
9012 
9013  m_FreeCount = 1;
9014  m_SumFreeSize = size;
9015 
9016  VmaSuballocation suballoc = {};
9017  suballoc.offset = 0;
9018  suballoc.size = size;
9019  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9020  suballoc.hAllocation = VK_NULL_HANDLE;
9021 
9022  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9023  m_Suballocations.push_back(suballoc);
9024  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
9025  --suballocItem;
9026  m_FreeSuballocationsBySize.push_back(suballocItem);
9027 }
9028 
9029 bool VmaBlockMetadata_Generic::Validate() const
9030 {
9031  VMA_VALIDATE(!m_Suballocations.empty());
9032 
9033  // Expected offset of new suballocation as calculated from previous ones.
9034  VkDeviceSize calculatedOffset = 0;
9035  // Expected number of free suballocations as calculated from traversing their list.
9036  uint32_t calculatedFreeCount = 0;
9037  // Expected sum size of free suballocations as calculated from traversing their list.
9038  VkDeviceSize calculatedSumFreeSize = 0;
9039  // Expected number of free suballocations that should be registered in
9040  // m_FreeSuballocationsBySize calculated from traversing their list.
9041  size_t freeSuballocationsToRegister = 0;
9042  // True if previous visited suballocation was free.
9043  bool prevFree = false;
9044 
9045  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9046  suballocItem != m_Suballocations.cend();
9047  ++suballocItem)
9048  {
9049  const VmaSuballocation& subAlloc = *suballocItem;
9050 
9051  // Actual offset of this suballocation doesn't match expected one.
9052  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
9053 
9054  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
9055  // Two adjacent free suballocations are invalid. They should be merged.
9056  VMA_VALIDATE(!prevFree || !currFree);
9057 
9058  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
9059 
9060  if(currFree)
9061  {
9062  calculatedSumFreeSize += subAlloc.size;
9063  ++calculatedFreeCount;
9064  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9065  {
9066  ++freeSuballocationsToRegister;
9067  }
9068 
9069  // Margin required between allocations - every free space must be at least that large.
9070  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
9071  }
9072  else
9073  {
9074  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
9075  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
9076 
9077  // Margin required between allocations - previous allocation must be free.
9078  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
9079  }
9080 
9081  calculatedOffset += subAlloc.size;
9082  prevFree = currFree;
9083  }
9084 
9085  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
9086  // match expected one.
9087  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
9088 
9089  VkDeviceSize lastSize = 0;
9090  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
9091  {
9092  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
9093 
9094  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
9095  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9096  // They must be sorted by size ascending.
9097  VMA_VALIDATE(suballocItem->size >= lastSize);
9098 
9099  lastSize = suballocItem->size;
9100  }
9101 
9102  // Check if totals match calculacted values.
9103  VMA_VALIDATE(ValidateFreeSuballocationList());
9104  VMA_VALIDATE(calculatedOffset == GetSize());
9105  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
9106  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
9107 
9108  return true;
9109 }
9110 
9111 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
9112 {
9113  if(!m_FreeSuballocationsBySize.empty())
9114  {
9115  return m_FreeSuballocationsBySize.back()->size;
9116  }
9117  else
9118  {
9119  return 0;
9120  }
9121 }
9122 
9123 bool VmaBlockMetadata_Generic::IsEmpty() const
9124 {
9125  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
9126 }
9127 
9128 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9129 {
9130  outInfo.blockCount = 1;
9131 
9132  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9133  outInfo.allocationCount = rangeCount - m_FreeCount;
9134  outInfo.unusedRangeCount = m_FreeCount;
9135 
9136  outInfo.unusedBytes = m_SumFreeSize;
9137  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
9138 
9139  outInfo.allocationSizeMin = UINT64_MAX;
9140  outInfo.allocationSizeMax = 0;
9141  outInfo.unusedRangeSizeMin = UINT64_MAX;
9142  outInfo.unusedRangeSizeMax = 0;
9143 
9144  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9145  suballocItem != m_Suballocations.cend();
9146  ++suballocItem)
9147  {
9148  const VmaSuballocation& suballoc = *suballocItem;
9149  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9150  {
9151  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9152  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
9153  }
9154  else
9155  {
9156  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
9157  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
9158  }
9159  }
9160 }
9161 
9162 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
9163 {
9164  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9165 
9166  inoutStats.size += GetSize();
9167  inoutStats.unusedSize += m_SumFreeSize;
9168  inoutStats.allocationCount += rangeCount - m_FreeCount;
9169  inoutStats.unusedRangeCount += m_FreeCount;
9170  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9171 }
9172 
9173 #if VMA_STATS_STRING_ENABLED
9174 
9175 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
9176 {
9177  PrintDetailedMap_Begin(json,
9178  m_SumFreeSize, // unusedBytes
9179  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
9180  m_FreeCount); // unusedRangeCount
9181 
9182  size_t i = 0;
9183  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9184  suballocItem != m_Suballocations.cend();
9185  ++suballocItem, ++i)
9186  {
9187  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9188  {
9189  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
9190  }
9191  else
9192  {
9193  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
9194  }
9195  }
9196 
9197  PrintDetailedMap_End(json);
9198 }
9199 
9200 #endif // #if VMA_STATS_STRING_ENABLED
9201 
9202 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
9203  uint32_t currentFrameIndex,
9204  uint32_t frameInUseCount,
9205  VkDeviceSize bufferImageGranularity,
9206  VkDeviceSize allocSize,
9207  VkDeviceSize allocAlignment,
9208  bool upperAddress,
9209  VmaSuballocationType allocType,
9210  bool canMakeOtherLost,
9211  uint32_t strategy,
9212  VmaAllocationRequest* pAllocationRequest)
9213 {
9214  VMA_ASSERT(allocSize > 0);
9215  VMA_ASSERT(!upperAddress);
9216  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9217  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9218  VMA_HEAVY_ASSERT(Validate());
9219 
9220  pAllocationRequest->type = VmaAllocationRequestType::Normal;
9221 
9222  // There is not enough total free space in this block to fullfill the request: Early return.
9223  if(canMakeOtherLost == false &&
9224  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
9225  {
9226  return false;
9227  }
9228 
9229  // New algorithm, efficiently searching freeSuballocationsBySize.
9230  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
9231  if(freeSuballocCount > 0)
9232  {
9234  {
9235  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
9236  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9237  m_FreeSuballocationsBySize.data(),
9238  m_FreeSuballocationsBySize.data() + freeSuballocCount,
9239  allocSize + 2 * VMA_DEBUG_MARGIN,
9240  VmaSuballocationItemSizeLess());
9241  size_t index = it - m_FreeSuballocationsBySize.data();
9242  for(; index < freeSuballocCount; ++index)
9243  {
9244  if(CheckAllocation(
9245  currentFrameIndex,
9246  frameInUseCount,
9247  bufferImageGranularity,
9248  allocSize,
9249  allocAlignment,
9250  allocType,
9251  m_FreeSuballocationsBySize[index],
9252  false, // canMakeOtherLost
9253  &pAllocationRequest->offset,
9254  &pAllocationRequest->itemsToMakeLostCount,
9255  &pAllocationRequest->sumFreeSize,
9256  &pAllocationRequest->sumItemSize))
9257  {
9258  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9259  return true;
9260  }
9261  }
9262  }
9263  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
9264  {
9265  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9266  it != m_Suballocations.end();
9267  ++it)
9268  {
9269  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
9270  currentFrameIndex,
9271  frameInUseCount,
9272  bufferImageGranularity,
9273  allocSize,
9274  allocAlignment,
9275  allocType,
9276  it,
9277  false, // canMakeOtherLost
9278  &pAllocationRequest->offset,
9279  &pAllocationRequest->itemsToMakeLostCount,
9280  &pAllocationRequest->sumFreeSize,
9281  &pAllocationRequest->sumItemSize))
9282  {
9283  pAllocationRequest->item = it;
9284  return true;
9285  }
9286  }
9287  }
9288  else // WORST_FIT, FIRST_FIT
9289  {
9290  // Search staring from biggest suballocations.
9291  for(size_t index = freeSuballocCount; index--; )
9292  {
9293  if(CheckAllocation(
9294  currentFrameIndex,
9295  frameInUseCount,
9296  bufferImageGranularity,
9297  allocSize,
9298  allocAlignment,
9299  allocType,
9300  m_FreeSuballocationsBySize[index],
9301  false, // canMakeOtherLost
9302  &pAllocationRequest->offset,
9303  &pAllocationRequest->itemsToMakeLostCount,
9304  &pAllocationRequest->sumFreeSize,
9305  &pAllocationRequest->sumItemSize))
9306  {
9307  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9308  return true;
9309  }
9310  }
9311  }
9312  }
9313 
9314  if(canMakeOtherLost)
9315  {
9316  // Brute-force algorithm. TODO: Come up with something better.
9317 
9318  bool found = false;
9319  VmaAllocationRequest tmpAllocRequest = {};
9320  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
9321  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
9322  suballocIt != m_Suballocations.end();
9323  ++suballocIt)
9324  {
9325  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
9326  suballocIt->hAllocation->CanBecomeLost())
9327  {
9328  if(CheckAllocation(
9329  currentFrameIndex,
9330  frameInUseCount,
9331  bufferImageGranularity,
9332  allocSize,
9333  allocAlignment,
9334  allocType,
9335  suballocIt,
9336  canMakeOtherLost,
9337  &tmpAllocRequest.offset,
9338  &tmpAllocRequest.itemsToMakeLostCount,
9339  &tmpAllocRequest.sumFreeSize,
9340  &tmpAllocRequest.sumItemSize))
9341  {
9343  {
9344  *pAllocationRequest = tmpAllocRequest;
9345  pAllocationRequest->item = suballocIt;
9346  break;
9347  }
9348  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
9349  {
9350  *pAllocationRequest = tmpAllocRequest;
9351  pAllocationRequest->item = suballocIt;
9352  found = true;
9353  }
9354  }
9355  }
9356  }
9357 
9358  return found;
9359  }
9360 
9361  return false;
9362 }
9363 
9364 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
9365  uint32_t currentFrameIndex,
9366  uint32_t frameInUseCount,
9367  VmaAllocationRequest* pAllocationRequest)
9368 {
9369  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
9370 
9371  while(pAllocationRequest->itemsToMakeLostCount > 0)
9372  {
9373  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
9374  {
9375  ++pAllocationRequest->item;
9376  }
9377  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9378  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
9379  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
9380  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9381  {
9382  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
9383  --pAllocationRequest->itemsToMakeLostCount;
9384  }
9385  else
9386  {
9387  return false;
9388  }
9389  }
9390 
9391  VMA_HEAVY_ASSERT(Validate());
9392  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9393  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
9394 
9395  return true;
9396 }
9397 
9398 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9399 {
9400  uint32_t lostAllocationCount = 0;
9401  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9402  it != m_Suballocations.end();
9403  ++it)
9404  {
9405  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
9406  it->hAllocation->CanBecomeLost() &&
9407  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9408  {
9409  it = FreeSuballocation(it);
9410  ++lostAllocationCount;
9411  }
9412  }
9413  return lostAllocationCount;
9414 }
9415 
9416 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
9417 {
9418  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9419  it != m_Suballocations.end();
9420  ++it)
9421  {
9422  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
9423  {
9424  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
9425  {
9426  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9427  return VK_ERROR_VALIDATION_FAILED_EXT;
9428  }
9429  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
9430  {
9431  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9432  return VK_ERROR_VALIDATION_FAILED_EXT;
9433  }
9434  }
9435  }
9436 
9437  return VK_SUCCESS;
9438 }
9439 
9440 void VmaBlockMetadata_Generic::Alloc(
9441  const VmaAllocationRequest& request,
9442  VmaSuballocationType type,
9443  VkDeviceSize allocSize,
9444  VmaAllocation hAllocation)
9445 {
9446  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
9447  VMA_ASSERT(request.item != m_Suballocations.end());
9448  VmaSuballocation& suballoc = *request.item;
9449  // Given suballocation is a free block.
9450  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9451  // Given offset is inside this suballocation.
9452  VMA_ASSERT(request.offset >= suballoc.offset);
9453  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
9454  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
9455  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
9456 
9457  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
9458  // it to become used.
9459  UnregisterFreeSuballocation(request.item);
9460 
9461  suballoc.offset = request.offset;
9462  suballoc.size = allocSize;
9463  suballoc.type = type;
9464  suballoc.hAllocation = hAllocation;
9465 
9466  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
9467  if(paddingEnd)
9468  {
9469  VmaSuballocation paddingSuballoc = {};
9470  paddingSuballoc.offset = request.offset + allocSize;
9471  paddingSuballoc.size = paddingEnd;
9472  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9473  VmaSuballocationList::iterator next = request.item;
9474  ++next;
9475  const VmaSuballocationList::iterator paddingEndItem =
9476  m_Suballocations.insert(next, paddingSuballoc);
9477  RegisterFreeSuballocation(paddingEndItem);
9478  }
9479 
9480  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
9481  if(paddingBegin)
9482  {
9483  VmaSuballocation paddingSuballoc = {};
9484  paddingSuballoc.offset = request.offset - paddingBegin;
9485  paddingSuballoc.size = paddingBegin;
9486  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9487  const VmaSuballocationList::iterator paddingBeginItem =
9488  m_Suballocations.insert(request.item, paddingSuballoc);
9489  RegisterFreeSuballocation(paddingBeginItem);
9490  }
9491 
9492  // Update totals.
9493  m_FreeCount = m_FreeCount - 1;
9494  if(paddingBegin > 0)
9495  {
9496  ++m_FreeCount;
9497  }
9498  if(paddingEnd > 0)
9499  {
9500  ++m_FreeCount;
9501  }
9502  m_SumFreeSize -= allocSize;
9503 }
9504 
9505 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
9506 {
9507  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9508  suballocItem != m_Suballocations.end();
9509  ++suballocItem)
9510  {
9511  VmaSuballocation& suballoc = *suballocItem;
9512  if(suballoc.hAllocation == allocation)
9513  {
9514  FreeSuballocation(suballocItem);
9515  VMA_HEAVY_ASSERT(Validate());
9516  return;
9517  }
9518  }
9519  VMA_ASSERT(0 && "Not found!");
9520 }
9521 
9522 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
9523 {
9524  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9525  suballocItem != m_Suballocations.end();
9526  ++suballocItem)
9527  {
9528  VmaSuballocation& suballoc = *suballocItem;
9529  if(suballoc.offset == offset)
9530  {
9531  FreeSuballocation(suballocItem);
9532  return;
9533  }
9534  }
9535  VMA_ASSERT(0 && "Not found!");
9536 }
9537 
9538 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
9539 {
9540  VkDeviceSize lastSize = 0;
9541  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
9542  {
9543  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
9544 
9545  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
9546  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9547  VMA_VALIDATE(it->size >= lastSize);
9548  lastSize = it->size;
9549  }
9550  return true;
9551 }
9552 
9553 bool VmaBlockMetadata_Generic::CheckAllocation(
9554  uint32_t currentFrameIndex,
9555  uint32_t frameInUseCount,
9556  VkDeviceSize bufferImageGranularity,
9557  VkDeviceSize allocSize,
9558  VkDeviceSize allocAlignment,
9559  VmaSuballocationType allocType,
9560  VmaSuballocationList::const_iterator suballocItem,
9561  bool canMakeOtherLost,
9562  VkDeviceSize* pOffset,
9563  size_t* itemsToMakeLostCount,
9564  VkDeviceSize* pSumFreeSize,
9565  VkDeviceSize* pSumItemSize) const
9566 {
9567  VMA_ASSERT(allocSize > 0);
9568  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9569  VMA_ASSERT(suballocItem != m_Suballocations.cend());
9570  VMA_ASSERT(pOffset != VMA_NULL);
9571 
9572  *itemsToMakeLostCount = 0;
9573  *pSumFreeSize = 0;
9574  *pSumItemSize = 0;
9575 
9576  if(canMakeOtherLost)
9577  {
9578  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9579  {
9580  *pSumFreeSize = suballocItem->size;
9581  }
9582  else
9583  {
9584  if(suballocItem->hAllocation->CanBecomeLost() &&
9585  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9586  {
9587  ++*itemsToMakeLostCount;
9588  *pSumItemSize = suballocItem->size;
9589  }
9590  else
9591  {
9592  return false;
9593  }
9594  }
9595 
9596  // Remaining size is too small for this request: Early return.
9597  if(GetSize() - suballocItem->offset < allocSize)
9598  {
9599  return false;
9600  }
9601 
9602  // Start from offset equal to beginning of this suballocation.
9603  *pOffset = suballocItem->offset;
9604 
9605  // Apply VMA_DEBUG_MARGIN at the beginning.
9606  if(VMA_DEBUG_MARGIN > 0)
9607  {
9608  *pOffset += VMA_DEBUG_MARGIN;
9609  }
9610 
9611  // Apply alignment.
9612  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9613 
9614  // Check previous suballocations for BufferImageGranularity conflicts.
9615  // Make bigger alignment if necessary.
9616  if(bufferImageGranularity > 1)
9617  {
9618  bool bufferImageGranularityConflict = false;
9619  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9620  while(prevSuballocItem != m_Suballocations.cbegin())
9621  {
9622  --prevSuballocItem;
9623  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9624  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9625  {
9626  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9627  {
9628  bufferImageGranularityConflict = true;
9629  break;
9630  }
9631  }
9632  else
9633  // Already on previous page.
9634  break;
9635  }
9636  if(bufferImageGranularityConflict)
9637  {
9638  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9639  }
9640  }
9641 
9642  // Now that we have final *pOffset, check if we are past suballocItem.
9643  // If yes, return false - this function should be called for another suballocItem as starting point.
9644  if(*pOffset >= suballocItem->offset + suballocItem->size)
9645  {
9646  return false;
9647  }
9648 
9649  // Calculate padding at the beginning based on current offset.
9650  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
9651 
9652  // Calculate required margin at the end.
9653  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9654 
9655  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
9656  // Another early return check.
9657  if(suballocItem->offset + totalSize > GetSize())
9658  {
9659  return false;
9660  }
9661 
9662  // Advance lastSuballocItem until desired size is reached.
9663  // Update itemsToMakeLostCount.
9664  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
9665  if(totalSize > suballocItem->size)
9666  {
9667  VkDeviceSize remainingSize = totalSize - suballocItem->size;
9668  while(remainingSize > 0)
9669  {
9670  ++lastSuballocItem;
9671  if(lastSuballocItem == m_Suballocations.cend())
9672  {
9673  return false;
9674  }
9675  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9676  {
9677  *pSumFreeSize += lastSuballocItem->size;
9678  }
9679  else
9680  {
9681  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
9682  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
9683  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9684  {
9685  ++*itemsToMakeLostCount;
9686  *pSumItemSize += lastSuballocItem->size;
9687  }
9688  else
9689  {
9690  return false;
9691  }
9692  }
9693  remainingSize = (lastSuballocItem->size < remainingSize) ?
9694  remainingSize - lastSuballocItem->size : 0;
9695  }
9696  }
9697 
9698  // Check next suballocations for BufferImageGranularity conflicts.
9699  // If conflict exists, we must mark more allocations lost or fail.
9700  if(bufferImageGranularity > 1)
9701  {
9702  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
9703  ++nextSuballocItem;
9704  while(nextSuballocItem != m_Suballocations.cend())
9705  {
9706  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9707  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9708  {
9709  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9710  {
9711  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
9712  if(nextSuballoc.hAllocation->CanBecomeLost() &&
9713  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9714  {
9715  ++*itemsToMakeLostCount;
9716  }
9717  else
9718  {
9719  return false;
9720  }
9721  }
9722  }
9723  else
9724  {
9725  // Already on next page.
9726  break;
9727  }
9728  ++nextSuballocItem;
9729  }
9730  }
9731  }
9732  else
9733  {
9734  const VmaSuballocation& suballoc = *suballocItem;
9735  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9736 
9737  *pSumFreeSize = suballoc.size;
9738 
9739  // Size of this suballocation is too small for this request: Early return.
9740  if(suballoc.size < allocSize)
9741  {
9742  return false;
9743  }
9744 
9745  // Start from offset equal to beginning of this suballocation.
9746  *pOffset = suballoc.offset;
9747 
9748  // Apply VMA_DEBUG_MARGIN at the beginning.
9749  if(VMA_DEBUG_MARGIN > 0)
9750  {
9751  *pOffset += VMA_DEBUG_MARGIN;
9752  }
9753 
9754  // Apply alignment.
9755  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9756 
9757  // Check previous suballocations for BufferImageGranularity conflicts.
9758  // Make bigger alignment if necessary.
9759  if(bufferImageGranularity > 1)
9760  {
9761  bool bufferImageGranularityConflict = false;
9762  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9763  while(prevSuballocItem != m_Suballocations.cbegin())
9764  {
9765  --prevSuballocItem;
9766  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9767  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9768  {
9769  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9770  {
9771  bufferImageGranularityConflict = true;
9772  break;
9773  }
9774  }
9775  else
9776  // Already on previous page.
9777  break;
9778  }
9779  if(bufferImageGranularityConflict)
9780  {
9781  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9782  }
9783  }
9784 
9785  // Calculate padding at the beginning based on current offset.
9786  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
9787 
9788  // Calculate required margin at the end.
9789  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9790 
9791  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
9792  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
9793  {
9794  return false;
9795  }
9796 
9797  // Check next suballocations for BufferImageGranularity conflicts.
9798  // If conflict exists, allocation cannot be made here.
9799  if(bufferImageGranularity > 1)
9800  {
9801  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
9802  ++nextSuballocItem;
9803  while(nextSuballocItem != m_Suballocations.cend())
9804  {
9805  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9806  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9807  {
9808  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9809  {
9810  return false;
9811  }
9812  }
9813  else
9814  {
9815  // Already on next page.
9816  break;
9817  }
9818  ++nextSuballocItem;
9819  }
9820  }
9821  }
9822 
9823  // All tests passed: Success. pOffset is already filled.
9824  return true;
9825 }
9826 
9827 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
9828 {
9829  VMA_ASSERT(item != m_Suballocations.end());
9830  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9831 
9832  VmaSuballocationList::iterator nextItem = item;
9833  ++nextItem;
9834  VMA_ASSERT(nextItem != m_Suballocations.end());
9835  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9836 
9837  item->size += nextItem->size;
9838  --m_FreeCount;
9839  m_Suballocations.erase(nextItem);
9840 }
9841 
9842 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9843 {
9844  // Change this suballocation to be marked as free.
9845  VmaSuballocation& suballoc = *suballocItem;
9846  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9847  suballoc.hAllocation = VK_NULL_HANDLE;
9848 
9849  // Update totals.
9850  ++m_FreeCount;
9851  m_SumFreeSize += suballoc.size;
9852 
9853  // Merge with previous and/or next suballocation if it's also free.
9854  bool mergeWithNext = false;
9855  bool mergeWithPrev = false;
9856 
9857  VmaSuballocationList::iterator nextItem = suballocItem;
9858  ++nextItem;
9859  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9860  {
9861  mergeWithNext = true;
9862  }
9863 
9864  VmaSuballocationList::iterator prevItem = suballocItem;
9865  if(suballocItem != m_Suballocations.begin())
9866  {
9867  --prevItem;
9868  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9869  {
9870  mergeWithPrev = true;
9871  }
9872  }
9873 
9874  if(mergeWithNext)
9875  {
9876  UnregisterFreeSuballocation(nextItem);
9877  MergeFreeWithNext(suballocItem);
9878  }
9879 
9880  if(mergeWithPrev)
9881  {
9882  UnregisterFreeSuballocation(prevItem);
9883  MergeFreeWithNext(prevItem);
9884  RegisterFreeSuballocation(prevItem);
9885  return prevItem;
9886  }
9887  else
9888  {
9889  RegisterFreeSuballocation(suballocItem);
9890  return suballocItem;
9891  }
9892 }
9893 
9894 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9895 {
9896  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9897  VMA_ASSERT(item->size > 0);
9898 
9899  // You may want to enable this validation at the beginning or at the end of
9900  // this function, depending on what do you want to check.
9901  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9902 
9903  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9904  {
9905  if(m_FreeSuballocationsBySize.empty())
9906  {
9907  m_FreeSuballocationsBySize.push_back(item);
9908  }
9909  else
9910  {
9911  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9912  }
9913  }
9914 
9915  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9916 }
9917 
9918 
9919 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9920 {
9921  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9922  VMA_ASSERT(item->size > 0);
9923 
9924  // You may want to enable this validation at the beginning or at the end of
9925  // this function, depending on what do you want to check.
9926  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9927 
9928  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9929  {
9930  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9931  m_FreeSuballocationsBySize.data(),
9932  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9933  item,
9934  VmaSuballocationItemSizeLess());
9935  for(size_t index = it - m_FreeSuballocationsBySize.data();
9936  index < m_FreeSuballocationsBySize.size();
9937  ++index)
9938  {
9939  if(m_FreeSuballocationsBySize[index] == item)
9940  {
9941  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9942  return;
9943  }
9944  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9945  }
9946  VMA_ASSERT(0 && "Not found.");
9947  }
9948 
9949  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9950 }
9951 
9952 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9953  VkDeviceSize bufferImageGranularity,
9954  VmaSuballocationType& inOutPrevSuballocType) const
9955 {
9956  if(bufferImageGranularity == 1 || IsEmpty())
9957  {
9958  return false;
9959  }
9960 
9961  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9962  bool typeConflictFound = false;
9963  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9964  it != m_Suballocations.cend();
9965  ++it)
9966  {
9967  const VmaSuballocationType suballocType = it->type;
9968  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9969  {
9970  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9971  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9972  {
9973  typeConflictFound = true;
9974  }
9975  inOutPrevSuballocType = suballocType;
9976  }
9977  }
9978 
9979  return typeConflictFound || minAlignment >= bufferImageGranularity;
9980 }
9981 
9983 // class VmaBlockMetadata_Linear
9984 
9985 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9986  VmaBlockMetadata(hAllocator),
9987  m_SumFreeSize(0),
9988  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9989  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9990  m_1stVectorIndex(0),
9991  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9992  m_1stNullItemsBeginCount(0),
9993  m_1stNullItemsMiddleCount(0),
9994  m_2ndNullItemsCount(0)
9995 {
9996 }
9997 
9998 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9999 {
10000 }
10001 
10002 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
10003 {
10004  VmaBlockMetadata::Init(size);
10005  m_SumFreeSize = size;
10006 }
10007 
10008 bool VmaBlockMetadata_Linear::Validate() const
10009 {
10010  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10011  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10012 
10013  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
10014  VMA_VALIDATE(!suballocations1st.empty() ||
10015  suballocations2nd.empty() ||
10016  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
10017 
10018  if(!suballocations1st.empty())
10019  {
10020  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
10021  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
10022  // Null item at the end should be just pop_back().
10023  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
10024  }
10025  if(!suballocations2nd.empty())
10026  {
10027  // Null item at the end should be just pop_back().
10028  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
10029  }
10030 
10031  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
10032  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
10033 
10034  VkDeviceSize sumUsedSize = 0;
10035  const size_t suballoc1stCount = suballocations1st.size();
10036  VkDeviceSize offset = VMA_DEBUG_MARGIN;
10037 
10038  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10039  {
10040  const size_t suballoc2ndCount = suballocations2nd.size();
10041  size_t nullItem2ndCount = 0;
10042  for(size_t i = 0; i < suballoc2ndCount; ++i)
10043  {
10044  const VmaSuballocation& suballoc = suballocations2nd[i];
10045  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10046 
10047  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10048  VMA_VALIDATE(suballoc.offset >= offset);
10049 
10050  if(!currFree)
10051  {
10052  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10053  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10054  sumUsedSize += suballoc.size;
10055  }
10056  else
10057  {
10058  ++nullItem2ndCount;
10059  }
10060 
10061  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10062  }
10063 
10064  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10065  }
10066 
10067  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
10068  {
10069  const VmaSuballocation& suballoc = suballocations1st[i];
10070  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
10071  suballoc.hAllocation == VK_NULL_HANDLE);
10072  }
10073 
10074  size_t nullItem1stCount = m_1stNullItemsBeginCount;
10075 
10076  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
10077  {
10078  const VmaSuballocation& suballoc = suballocations1st[i];
10079  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10080 
10081  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10082  VMA_VALIDATE(suballoc.offset >= offset);
10083  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
10084 
10085  if(!currFree)
10086  {
10087  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10088  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10089  sumUsedSize += suballoc.size;
10090  }
10091  else
10092  {
10093  ++nullItem1stCount;
10094  }
10095 
10096  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10097  }
10098  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
10099 
10100  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10101  {
10102  const size_t suballoc2ndCount = suballocations2nd.size();
10103  size_t nullItem2ndCount = 0;
10104  for(size_t i = suballoc2ndCount; i--; )
10105  {
10106  const VmaSuballocation& suballoc = suballocations2nd[i];
10107  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10108 
10109  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10110  VMA_VALIDATE(suballoc.offset >= offset);
10111 
10112  if(!currFree)
10113  {
10114  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10115  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10116  sumUsedSize += suballoc.size;
10117  }
10118  else
10119  {
10120  ++nullItem2ndCount;
10121  }
10122 
10123  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10124  }
10125 
10126  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10127  }
10128 
10129  VMA_VALIDATE(offset <= GetSize());
10130  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
10131 
10132  return true;
10133 }
10134 
10135 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
10136 {
10137  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
10138  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
10139 }
10140 
10141 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
10142 {
10143  const VkDeviceSize size = GetSize();
10144 
10145  /*
10146  We don't consider gaps inside allocation vectors with freed allocations because
10147  they are not suitable for reuse in linear allocator. We consider only space that
10148  is available for new allocations.
10149  */
10150  if(IsEmpty())
10151  {
10152  return size;
10153  }
10154 
10155  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10156 
10157  switch(m_2ndVectorMode)
10158  {
10159  case SECOND_VECTOR_EMPTY:
10160  /*
10161  Available space is after end of 1st, as well as before beginning of 1st (which
10162  whould make it a ring buffer).
10163  */
10164  {
10165  const size_t suballocations1stCount = suballocations1st.size();
10166  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
10167  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10168  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
10169  return VMA_MAX(
10170  firstSuballoc.offset,
10171  size - (lastSuballoc.offset + lastSuballoc.size));
10172  }
10173  break;
10174 
10175  case SECOND_VECTOR_RING_BUFFER:
10176  /*
10177  Available space is only between end of 2nd and beginning of 1st.
10178  */
10179  {
10180  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10181  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
10182  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
10183  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
10184  }
10185  break;
10186 
10187  case SECOND_VECTOR_DOUBLE_STACK:
10188  /*
10189  Available space is only between end of 1st and top of 2nd.
10190  */
10191  {
10192  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10193  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
10194  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
10195  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
10196  }
10197  break;
10198 
10199  default:
10200  VMA_ASSERT(0);
10201  return 0;
10202  }
10203 }
10204 
10205 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10206 {
10207  const VkDeviceSize size = GetSize();
10208  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10209  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10210  const size_t suballoc1stCount = suballocations1st.size();
10211  const size_t suballoc2ndCount = suballocations2nd.size();
10212 
10213  outInfo.blockCount = 1;
10214  outInfo.allocationCount = (uint32_t)GetAllocationCount();
10215  outInfo.unusedRangeCount = 0;
10216  outInfo.usedBytes = 0;
10217  outInfo.allocationSizeMin = UINT64_MAX;
10218  outInfo.allocationSizeMax = 0;
10219  outInfo.unusedRangeSizeMin = UINT64_MAX;
10220  outInfo.unusedRangeSizeMax = 0;
10221 
10222  VkDeviceSize lastOffset = 0;
10223 
10224  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10225  {
10226  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10227  size_t nextAlloc2ndIndex = 0;
10228  while(lastOffset < freeSpace2ndTo1stEnd)
10229  {
10230  // Find next non-null allocation or move nextAllocIndex to the end.
10231  while(nextAlloc2ndIndex < suballoc2ndCount &&
10232  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10233  {
10234  ++nextAlloc2ndIndex;
10235  }
10236 
10237  // Found non-null allocation.
10238  if(nextAlloc2ndIndex < suballoc2ndCount)
10239  {
10240  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10241 
10242  // 1. Process free space before this allocation.
10243  if(lastOffset < suballoc.offset)
10244  {
10245  // There is free space from lastOffset to suballoc.offset.
10246  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10247  ++outInfo.unusedRangeCount;
10248  outInfo.unusedBytes += unusedRangeSize;
10249  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10250  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10251  }
10252 
10253  // 2. Process this allocation.
10254  // There is allocation with suballoc.offset, suballoc.size.
10255  outInfo.usedBytes += suballoc.size;
10256  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10257  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10258 
10259  // 3. Prepare for next iteration.
10260  lastOffset = suballoc.offset + suballoc.size;
10261  ++nextAlloc2ndIndex;
10262  }
10263  // We are at the end.
10264  else
10265  {
10266  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10267  if(lastOffset < freeSpace2ndTo1stEnd)
10268  {
10269  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10270  ++outInfo.unusedRangeCount;
10271  outInfo.unusedBytes += unusedRangeSize;
10272  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10273  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10274  }
10275 
10276  // End of loop.
10277  lastOffset = freeSpace2ndTo1stEnd;
10278  }
10279  }
10280  }
10281 
10282  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10283  const VkDeviceSize freeSpace1stTo2ndEnd =
10284  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10285  while(lastOffset < freeSpace1stTo2ndEnd)
10286  {
10287  // Find next non-null allocation or move nextAllocIndex to the end.
10288  while(nextAlloc1stIndex < suballoc1stCount &&
10289  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10290  {
10291  ++nextAlloc1stIndex;
10292  }
10293 
10294  // Found non-null allocation.
10295  if(nextAlloc1stIndex < suballoc1stCount)
10296  {
10297  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10298 
10299  // 1. Process free space before this allocation.
10300  if(lastOffset < suballoc.offset)
10301  {
10302  // There is free space from lastOffset to suballoc.offset.
10303  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10304  ++outInfo.unusedRangeCount;
10305  outInfo.unusedBytes += unusedRangeSize;
10306  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10307  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10308  }
10309 
10310  // 2. Process this allocation.
10311  // There is allocation with suballoc.offset, suballoc.size.
10312  outInfo.usedBytes += suballoc.size;
10313  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10314  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10315 
10316  // 3. Prepare for next iteration.
10317  lastOffset = suballoc.offset + suballoc.size;
10318  ++nextAlloc1stIndex;
10319  }
10320  // We are at the end.
10321  else
10322  {
10323  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10324  if(lastOffset < freeSpace1stTo2ndEnd)
10325  {
10326  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10327  ++outInfo.unusedRangeCount;
10328  outInfo.unusedBytes += unusedRangeSize;
10329  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10330  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10331  }
10332 
10333  // End of loop.
10334  lastOffset = freeSpace1stTo2ndEnd;
10335  }
10336  }
10337 
10338  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10339  {
10340  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10341  while(lastOffset < size)
10342  {
10343  // Find next non-null allocation or move nextAllocIndex to the end.
10344  while(nextAlloc2ndIndex != SIZE_MAX &&
10345  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10346  {
10347  --nextAlloc2ndIndex;
10348  }
10349 
10350  // Found non-null allocation.
10351  if(nextAlloc2ndIndex != SIZE_MAX)
10352  {
10353  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10354 
10355  // 1. Process free space before this allocation.
10356  if(lastOffset < suballoc.offset)
10357  {
10358  // There is free space from lastOffset to suballoc.offset.
10359  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10360  ++outInfo.unusedRangeCount;
10361  outInfo.unusedBytes += unusedRangeSize;
10362  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10363  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10364  }
10365 
10366  // 2. Process this allocation.
10367  // There is allocation with suballoc.offset, suballoc.size.
10368  outInfo.usedBytes += suballoc.size;
10369  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10370  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10371 
10372  // 3. Prepare for next iteration.
10373  lastOffset = suballoc.offset + suballoc.size;
10374  --nextAlloc2ndIndex;
10375  }
10376  // We are at the end.
10377  else
10378  {
10379  // There is free space from lastOffset to size.
10380  if(lastOffset < size)
10381  {
10382  const VkDeviceSize unusedRangeSize = size - lastOffset;
10383  ++outInfo.unusedRangeCount;
10384  outInfo.unusedBytes += unusedRangeSize;
10385  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10386  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10387  }
10388 
10389  // End of loop.
10390  lastOffset = size;
10391  }
10392  }
10393  }
10394 
10395  outInfo.unusedBytes = size - outInfo.usedBytes;
10396 }
10397 
10398 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
10399 {
10400  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10401  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10402  const VkDeviceSize size = GetSize();
10403  const size_t suballoc1stCount = suballocations1st.size();
10404  const size_t suballoc2ndCount = suballocations2nd.size();
10405 
10406  inoutStats.size += size;
10407 
10408  VkDeviceSize lastOffset = 0;
10409 
10410  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10411  {
10412  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10413  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
10414  while(lastOffset < freeSpace2ndTo1stEnd)
10415  {
10416  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10417  while(nextAlloc2ndIndex < suballoc2ndCount &&
10418  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10419  {
10420  ++nextAlloc2ndIndex;
10421  }
10422 
10423  // Found non-null allocation.
10424  if(nextAlloc2ndIndex < suballoc2ndCount)
10425  {
10426  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10427 
10428  // 1. Process free space before this allocation.
10429  if(lastOffset < suballoc.offset)
10430  {
10431  // There is free space from lastOffset to suballoc.offset.
10432  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10433  inoutStats.unusedSize += unusedRangeSize;
10434  ++inoutStats.unusedRangeCount;
10435  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10436  }
10437 
10438  // 2. Process this allocation.
10439  // There is allocation with suballoc.offset, suballoc.size.
10440  ++inoutStats.allocationCount;
10441 
10442  // 3. Prepare for next iteration.
10443  lastOffset = suballoc.offset + suballoc.size;
10444  ++nextAlloc2ndIndex;
10445  }
10446  // We are at the end.
10447  else
10448  {
10449  if(lastOffset < freeSpace2ndTo1stEnd)
10450  {
10451  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10452  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10453  inoutStats.unusedSize += unusedRangeSize;
10454  ++inoutStats.unusedRangeCount;
10455  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10456  }
10457 
10458  // End of loop.
10459  lastOffset = freeSpace2ndTo1stEnd;
10460  }
10461  }
10462  }
10463 
10464  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10465  const VkDeviceSize freeSpace1stTo2ndEnd =
10466  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10467  while(lastOffset < freeSpace1stTo2ndEnd)
10468  {
10469  // Find next non-null allocation or move nextAllocIndex to the end.
10470  while(nextAlloc1stIndex < suballoc1stCount &&
10471  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10472  {
10473  ++nextAlloc1stIndex;
10474  }
10475 
10476  // Found non-null allocation.
10477  if(nextAlloc1stIndex < suballoc1stCount)
10478  {
10479  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10480 
10481  // 1. Process free space before this allocation.
10482  if(lastOffset < suballoc.offset)
10483  {
10484  // There is free space from lastOffset to suballoc.offset.
10485  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10486  inoutStats.unusedSize += unusedRangeSize;
10487  ++inoutStats.unusedRangeCount;
10488  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10489  }
10490 
10491  // 2. Process this allocation.
10492  // There is allocation with suballoc.offset, suballoc.size.
10493  ++inoutStats.allocationCount;
10494 
10495  // 3. Prepare for next iteration.
10496  lastOffset = suballoc.offset + suballoc.size;
10497  ++nextAlloc1stIndex;
10498  }
10499  // We are at the end.
10500  else
10501  {
10502  if(lastOffset < freeSpace1stTo2ndEnd)
10503  {
10504  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10505  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10506  inoutStats.unusedSize += unusedRangeSize;
10507  ++inoutStats.unusedRangeCount;
10508  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10509  }
10510 
10511  // End of loop.
10512  lastOffset = freeSpace1stTo2ndEnd;
10513  }
10514  }
10515 
10516  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10517  {
10518  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10519  while(lastOffset < size)
10520  {
10521  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10522  while(nextAlloc2ndIndex != SIZE_MAX &&
10523  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10524  {
10525  --nextAlloc2ndIndex;
10526  }
10527 
10528  // Found non-null allocation.
10529  if(nextAlloc2ndIndex != SIZE_MAX)
10530  {
10531  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10532 
10533  // 1. Process free space before this allocation.
10534  if(lastOffset < suballoc.offset)
10535  {
10536  // There is free space from lastOffset to suballoc.offset.
10537  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10538  inoutStats.unusedSize += unusedRangeSize;
10539  ++inoutStats.unusedRangeCount;
10540  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10541  }
10542 
10543  // 2. Process this allocation.
10544  // There is allocation with suballoc.offset, suballoc.size.
10545  ++inoutStats.allocationCount;
10546 
10547  // 3. Prepare for next iteration.
10548  lastOffset = suballoc.offset + suballoc.size;
10549  --nextAlloc2ndIndex;
10550  }
10551  // We are at the end.
10552  else
10553  {
10554  if(lastOffset < size)
10555  {
10556  // There is free space from lastOffset to size.
10557  const VkDeviceSize unusedRangeSize = size - lastOffset;
10558  inoutStats.unusedSize += unusedRangeSize;
10559  ++inoutStats.unusedRangeCount;
10560  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10561  }
10562 
10563  // End of loop.
10564  lastOffset = size;
10565  }
10566  }
10567  }
10568 }
10569 
10570 #if VMA_STATS_STRING_ENABLED
10571 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
10572 {
10573  const VkDeviceSize size = GetSize();
10574  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10575  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10576  const size_t suballoc1stCount = suballocations1st.size();
10577  const size_t suballoc2ndCount = suballocations2nd.size();
10578 
10579  // FIRST PASS
10580 
10581  size_t unusedRangeCount = 0;
10582  VkDeviceSize usedBytes = 0;
10583 
10584  VkDeviceSize lastOffset = 0;
10585 
10586  size_t alloc2ndCount = 0;
10587  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10588  {
10589  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10590  size_t nextAlloc2ndIndex = 0;
10591  while(lastOffset < freeSpace2ndTo1stEnd)
10592  {
10593  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10594  while(nextAlloc2ndIndex < suballoc2ndCount &&
10595  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10596  {
10597  ++nextAlloc2ndIndex;
10598  }
10599 
10600  // Found non-null allocation.
10601  if(nextAlloc2ndIndex < suballoc2ndCount)
10602  {
10603  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10604 
10605  // 1. Process free space before this allocation.
10606  if(lastOffset < suballoc.offset)
10607  {
10608  // There is free space from lastOffset to suballoc.offset.
10609  ++unusedRangeCount;
10610  }
10611 
10612  // 2. Process this allocation.
10613  // There is allocation with suballoc.offset, suballoc.size.
10614  ++alloc2ndCount;
10615  usedBytes += suballoc.size;
10616 
10617  // 3. Prepare for next iteration.
10618  lastOffset = suballoc.offset + suballoc.size;
10619  ++nextAlloc2ndIndex;
10620  }
10621  // We are at the end.
10622  else
10623  {
10624  if(lastOffset < freeSpace2ndTo1stEnd)
10625  {
10626  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10627  ++unusedRangeCount;
10628  }
10629 
10630  // End of loop.
10631  lastOffset = freeSpace2ndTo1stEnd;
10632  }
10633  }
10634  }
10635 
10636  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10637  size_t alloc1stCount = 0;
10638  const VkDeviceSize freeSpace1stTo2ndEnd =
10639  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10640  while(lastOffset < freeSpace1stTo2ndEnd)
10641  {
10642  // Find next non-null allocation or move nextAllocIndex to the end.
10643  while(nextAlloc1stIndex < suballoc1stCount &&
10644  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10645  {
10646  ++nextAlloc1stIndex;
10647  }
10648 
10649  // Found non-null allocation.
10650  if(nextAlloc1stIndex < suballoc1stCount)
10651  {
10652  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10653 
10654  // 1. Process free space before this allocation.
10655  if(lastOffset < suballoc.offset)
10656  {
10657  // There is free space from lastOffset to suballoc.offset.
10658  ++unusedRangeCount;
10659  }
10660 
10661  // 2. Process this allocation.
10662  // There is allocation with suballoc.offset, suballoc.size.
10663  ++alloc1stCount;
10664  usedBytes += suballoc.size;
10665 
10666  // 3. Prepare for next iteration.
10667  lastOffset = suballoc.offset + suballoc.size;
10668  ++nextAlloc1stIndex;
10669  }
10670  // We are at the end.
10671  else
10672  {
10673  if(lastOffset < size)
10674  {
10675  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10676  ++unusedRangeCount;
10677  }
10678 
10679  // End of loop.
10680  lastOffset = freeSpace1stTo2ndEnd;
10681  }
10682  }
10683 
10684  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10685  {
10686  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10687  while(lastOffset < size)
10688  {
10689  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10690  while(nextAlloc2ndIndex != SIZE_MAX &&
10691  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10692  {
10693  --nextAlloc2ndIndex;
10694  }
10695 
10696  // Found non-null allocation.
10697  if(nextAlloc2ndIndex != SIZE_MAX)
10698  {
10699  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10700 
10701  // 1. Process free space before this allocation.
10702  if(lastOffset < suballoc.offset)
10703  {
10704  // There is free space from lastOffset to suballoc.offset.
10705  ++unusedRangeCount;
10706  }
10707 
10708  // 2. Process this allocation.
10709  // There is allocation with suballoc.offset, suballoc.size.
10710  ++alloc2ndCount;
10711  usedBytes += suballoc.size;
10712 
10713  // 3. Prepare for next iteration.
10714  lastOffset = suballoc.offset + suballoc.size;
10715  --nextAlloc2ndIndex;
10716  }
10717  // We are at the end.
10718  else
10719  {
10720  if(lastOffset < size)
10721  {
10722  // There is free space from lastOffset to size.
10723  ++unusedRangeCount;
10724  }
10725 
10726  // End of loop.
10727  lastOffset = size;
10728  }
10729  }
10730  }
10731 
10732  const VkDeviceSize unusedBytes = size - usedBytes;
10733  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
10734 
10735  // SECOND PASS
10736  lastOffset = 0;
10737 
10738  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10739  {
10740  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10741  size_t nextAlloc2ndIndex = 0;
10742  while(lastOffset < freeSpace2ndTo1stEnd)
10743  {
10744  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10745  while(nextAlloc2ndIndex < suballoc2ndCount &&
10746  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10747  {
10748  ++nextAlloc2ndIndex;
10749  }
10750 
10751  // Found non-null allocation.
10752  if(nextAlloc2ndIndex < suballoc2ndCount)
10753  {
10754  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10755 
10756  // 1. Process free space before this allocation.
10757  if(lastOffset < suballoc.offset)
10758  {
10759  // There is free space from lastOffset to suballoc.offset.
10760  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10761  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10762  }
10763 
10764  // 2. Process this allocation.
10765  // There is allocation with suballoc.offset, suballoc.size.
10766  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10767 
10768  // 3. Prepare for next iteration.
10769  lastOffset = suballoc.offset + suballoc.size;
10770  ++nextAlloc2ndIndex;
10771  }
10772  // We are at the end.
10773  else
10774  {
10775  if(lastOffset < freeSpace2ndTo1stEnd)
10776  {
10777  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10778  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10779  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10780  }
10781 
10782  // End of loop.
10783  lastOffset = freeSpace2ndTo1stEnd;
10784  }
10785  }
10786  }
10787 
10788  nextAlloc1stIndex = m_1stNullItemsBeginCount;
10789  while(lastOffset < freeSpace1stTo2ndEnd)
10790  {
10791  // Find next non-null allocation or move nextAllocIndex to the end.
10792  while(nextAlloc1stIndex < suballoc1stCount &&
10793  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10794  {
10795  ++nextAlloc1stIndex;
10796  }
10797 
10798  // Found non-null allocation.
10799  if(nextAlloc1stIndex < suballoc1stCount)
10800  {
10801  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10802 
10803  // 1. Process free space before this allocation.
10804  if(lastOffset < suballoc.offset)
10805  {
10806  // There is free space from lastOffset to suballoc.offset.
10807  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10808  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10809  }
10810 
10811  // 2. Process this allocation.
10812  // There is allocation with suballoc.offset, suballoc.size.
10813  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10814 
10815  // 3. Prepare for next iteration.
10816  lastOffset = suballoc.offset + suballoc.size;
10817  ++nextAlloc1stIndex;
10818  }
10819  // We are at the end.
10820  else
10821  {
10822  if(lastOffset < freeSpace1stTo2ndEnd)
10823  {
10824  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10825  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10826  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10827  }
10828 
10829  // End of loop.
10830  lastOffset = freeSpace1stTo2ndEnd;
10831  }
10832  }
10833 
10834  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10835  {
10836  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10837  while(lastOffset < size)
10838  {
10839  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10840  while(nextAlloc2ndIndex != SIZE_MAX &&
10841  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10842  {
10843  --nextAlloc2ndIndex;
10844  }
10845 
10846  // Found non-null allocation.
10847  if(nextAlloc2ndIndex != SIZE_MAX)
10848  {
10849  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10850 
10851  // 1. Process free space before this allocation.
10852  if(lastOffset < suballoc.offset)
10853  {
10854  // There is free space from lastOffset to suballoc.offset.
10855  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10856  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10857  }
10858 
10859  // 2. Process this allocation.
10860  // There is allocation with suballoc.offset, suballoc.size.
10861  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10862 
10863  // 3. Prepare for next iteration.
10864  lastOffset = suballoc.offset + suballoc.size;
10865  --nextAlloc2ndIndex;
10866  }
10867  // We are at the end.
10868  else
10869  {
10870  if(lastOffset < size)
10871  {
10872  // There is free space from lastOffset to size.
10873  const VkDeviceSize unusedRangeSize = size - lastOffset;
10874  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10875  }
10876 
10877  // End of loop.
10878  lastOffset = size;
10879  }
10880  }
10881  }
10882 
10883  PrintDetailedMap_End(json);
10884 }
10885 #endif // #if VMA_STATS_STRING_ENABLED
10886 
10887 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10888  uint32_t currentFrameIndex,
10889  uint32_t frameInUseCount,
10890  VkDeviceSize bufferImageGranularity,
10891  VkDeviceSize allocSize,
10892  VkDeviceSize allocAlignment,
10893  bool upperAddress,
10894  VmaSuballocationType allocType,
10895  bool canMakeOtherLost,
10896  uint32_t strategy,
10897  VmaAllocationRequest* pAllocationRequest)
10898 {
10899  VMA_ASSERT(allocSize > 0);
10900  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10901  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10902  VMA_HEAVY_ASSERT(Validate());
10903  return upperAddress ?
10904  CreateAllocationRequest_UpperAddress(
10905  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10906  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10907  CreateAllocationRequest_LowerAddress(
10908  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10909  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10910 }
10911 
10912 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10913  uint32_t currentFrameIndex,
10914  uint32_t frameInUseCount,
10915  VkDeviceSize bufferImageGranularity,
10916  VkDeviceSize allocSize,
10917  VkDeviceSize allocAlignment,
10918  VmaSuballocationType allocType,
10919  bool canMakeOtherLost,
10920  uint32_t strategy,
10921  VmaAllocationRequest* pAllocationRequest)
10922 {
10923  const VkDeviceSize size = GetSize();
10924  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10925  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10926 
10927  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10928  {
10929  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10930  return false;
10931  }
10932 
10933  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10934  if(allocSize > size)
10935  {
10936  return false;
10937  }
10938  VkDeviceSize resultBaseOffset = size - allocSize;
10939  if(!suballocations2nd.empty())
10940  {
10941  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10942  resultBaseOffset = lastSuballoc.offset - allocSize;
10943  if(allocSize > lastSuballoc.offset)
10944  {
10945  return false;
10946  }
10947  }
10948 
10949  // Start from offset equal to end of free space.
10950  VkDeviceSize resultOffset = resultBaseOffset;
10951 
10952  // Apply VMA_DEBUG_MARGIN at the end.
10953  if(VMA_DEBUG_MARGIN > 0)
10954  {
10955  if(resultOffset < VMA_DEBUG_MARGIN)
10956  {
10957  return false;
10958  }
10959  resultOffset -= VMA_DEBUG_MARGIN;
10960  }
10961 
10962  // Apply alignment.
10963  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10964 
10965  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10966  // Make bigger alignment if necessary.
10967  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10968  {
10969  bool bufferImageGranularityConflict = false;
10970  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10971  {
10972  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10973  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10974  {
10975  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10976  {
10977  bufferImageGranularityConflict = true;
10978  break;
10979  }
10980  }
10981  else
10982  // Already on previous page.
10983  break;
10984  }
10985  if(bufferImageGranularityConflict)
10986  {
10987  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10988  }
10989  }
10990 
10991  // There is enough free space.
10992  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10993  suballocations1st.back().offset + suballocations1st.back().size :
10994  0;
10995  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10996  {
10997  // Check previous suballocations for BufferImageGranularity conflicts.
10998  // If conflict exists, allocation cannot be made here.
10999  if(bufferImageGranularity > 1)
11000  {
11001  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11002  {
11003  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11004  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11005  {
11006  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
11007  {
11008  return false;
11009  }
11010  }
11011  else
11012  {
11013  // Already on next page.
11014  break;
11015  }
11016  }
11017  }
11018 
11019  // All tests passed: Success.
11020  pAllocationRequest->offset = resultOffset;
11021  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
11022  pAllocationRequest->sumItemSize = 0;
11023  // pAllocationRequest->item unused.
11024  pAllocationRequest->itemsToMakeLostCount = 0;
11025  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
11026  return true;
11027  }
11028 
11029  return false;
11030 }
11031 
11032 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
11033  uint32_t currentFrameIndex,
11034  uint32_t frameInUseCount,
11035  VkDeviceSize bufferImageGranularity,
11036  VkDeviceSize allocSize,
11037  VkDeviceSize allocAlignment,
11038  VmaSuballocationType allocType,
11039  bool canMakeOtherLost,
11040  uint32_t strategy,
11041  VmaAllocationRequest* pAllocationRequest)
11042 {
11043  const VkDeviceSize size = GetSize();
11044  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11045  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11046 
11047  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11048  {
11049  // Try to allocate at the end of 1st vector.
11050 
11051  VkDeviceSize resultBaseOffset = 0;
11052  if(!suballocations1st.empty())
11053  {
11054  const VmaSuballocation& lastSuballoc = suballocations1st.back();
11055  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11056  }
11057 
11058  // Start from offset equal to beginning of free space.
11059  VkDeviceSize resultOffset = resultBaseOffset;
11060 
11061  // Apply VMA_DEBUG_MARGIN at the beginning.
11062  if(VMA_DEBUG_MARGIN > 0)
11063  {
11064  resultOffset += VMA_DEBUG_MARGIN;
11065  }
11066 
11067  // Apply alignment.
11068  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11069 
11070  // Check previous suballocations for BufferImageGranularity conflicts.
11071  // Make bigger alignment if necessary.
11072  if(bufferImageGranularity > 1 && !suballocations1st.empty())
11073  {
11074  bool bufferImageGranularityConflict = false;
11075  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11076  {
11077  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11078  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11079  {
11080  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11081  {
11082  bufferImageGranularityConflict = true;
11083  break;
11084  }
11085  }
11086  else
11087  // Already on previous page.
11088  break;
11089  }
11090  if(bufferImageGranularityConflict)
11091  {
11092  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11093  }
11094  }
11095 
11096  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
11097  suballocations2nd.back().offset : size;
11098 
11099  // There is enough free space at the end after alignment.
11100  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
11101  {
11102  // Check next suballocations for BufferImageGranularity conflicts.
11103  // If conflict exists, allocation cannot be made here.
11104  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11105  {
11106  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
11107  {
11108  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
11109  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11110  {
11111  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11112  {
11113  return false;
11114  }
11115  }
11116  else
11117  {
11118  // Already on previous page.
11119  break;
11120  }
11121  }
11122  }
11123 
11124  // All tests passed: Success.
11125  pAllocationRequest->offset = resultOffset;
11126  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
11127  pAllocationRequest->sumItemSize = 0;
11128  // pAllocationRequest->item, customData unused.
11129  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
11130  pAllocationRequest->itemsToMakeLostCount = 0;
11131  return true;
11132  }
11133  }
11134 
11135  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
11136  // beginning of 1st vector as the end of free space.
11137  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11138  {
11139  VMA_ASSERT(!suballocations1st.empty());
11140 
11141  VkDeviceSize resultBaseOffset = 0;
11142  if(!suballocations2nd.empty())
11143  {
11144  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
11145  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11146  }
11147 
11148  // Start from offset equal to beginning of free space.
11149  VkDeviceSize resultOffset = resultBaseOffset;
11150 
11151  // Apply VMA_DEBUG_MARGIN at the beginning.
11152  if(VMA_DEBUG_MARGIN > 0)
11153  {
11154  resultOffset += VMA_DEBUG_MARGIN;
11155  }
11156 
11157  // Apply alignment.
11158  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11159 
11160  // Check previous suballocations for BufferImageGranularity conflicts.
11161  // Make bigger alignment if necessary.
11162  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
11163  {
11164  bool bufferImageGranularityConflict = false;
11165  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
11166  {
11167  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
11168  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11169  {
11170  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11171  {
11172  bufferImageGranularityConflict = true;
11173  break;
11174  }
11175  }
11176  else
11177  // Already on previous page.
11178  break;
11179  }
11180  if(bufferImageGranularityConflict)
11181  {
11182  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11183  }
11184  }
11185 
11186  pAllocationRequest->itemsToMakeLostCount = 0;
11187  pAllocationRequest->sumItemSize = 0;
11188  size_t index1st = m_1stNullItemsBeginCount;
11189 
11190  if(canMakeOtherLost)
11191  {
11192  while(index1st < suballocations1st.size() &&
11193  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
11194  {
11195  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
11196  const VmaSuballocation& suballoc = suballocations1st[index1st];
11197  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
11198  {
11199  // No problem.
11200  }
11201  else
11202  {
11203  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11204  if(suballoc.hAllocation->CanBecomeLost() &&
11205  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11206  {
11207  ++pAllocationRequest->itemsToMakeLostCount;
11208  pAllocationRequest->sumItemSize += suballoc.size;
11209  }
11210  else
11211  {
11212  return false;
11213  }
11214  }
11215  ++index1st;
11216  }
11217 
11218  // Check next suballocations for BufferImageGranularity conflicts.
11219  // If conflict exists, we must mark more allocations lost or fail.
11220  if(bufferImageGranularity > 1)
11221  {
11222  while(index1st < suballocations1st.size())
11223  {
11224  const VmaSuballocation& suballoc = suballocations1st[index1st];
11225  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
11226  {
11227  if(suballoc.hAllocation != VK_NULL_HANDLE)
11228  {
11229  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
11230  if(suballoc.hAllocation->CanBecomeLost() &&
11231  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11232  {
11233  ++pAllocationRequest->itemsToMakeLostCount;
11234  pAllocationRequest->sumItemSize += suballoc.size;
11235  }
11236  else
11237  {
11238  return false;
11239  }
11240  }
11241  }
11242  else
11243  {
11244  // Already on next page.
11245  break;
11246  }
11247  ++index1st;
11248  }
11249  }
11250 
11251  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
11252  if(index1st == suballocations1st.size() &&
11253  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
11254  {
11255  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
11256  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
11257  }
11258  }
11259 
11260  // There is enough free space at the end after alignment.
11261  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
11262  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
11263  {
11264  // Check next suballocations for BufferImageGranularity conflicts.
11265  // If conflict exists, allocation cannot be made here.
11266  if(bufferImageGranularity > 1)
11267  {
11268  for(size_t nextSuballocIndex = index1st;
11269  nextSuballocIndex < suballocations1st.size();
11270  nextSuballocIndex++)
11271  {
11272  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
11273  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11274  {
11275  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11276  {
11277  return false;
11278  }
11279  }
11280  else
11281  {
11282  // Already on next page.
11283  break;
11284  }
11285  }
11286  }
11287 
11288  // All tests passed: Success.
11289  pAllocationRequest->offset = resultOffset;
11290  pAllocationRequest->sumFreeSize =
11291  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
11292  - resultBaseOffset
11293  - pAllocationRequest->sumItemSize;
11294  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
11295  // pAllocationRequest->item, customData unused.
11296  return true;
11297  }
11298  }
11299 
11300  return false;
11301 }
11302 
11303 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
11304  uint32_t currentFrameIndex,
11305  uint32_t frameInUseCount,
11306  VmaAllocationRequest* pAllocationRequest)
11307 {
11308  if(pAllocationRequest->itemsToMakeLostCount == 0)
11309  {
11310  return true;
11311  }
11312 
11313  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
11314 
11315  // We always start from 1st.
11316  SuballocationVectorType* suballocations = &AccessSuballocations1st();
11317  size_t index = m_1stNullItemsBeginCount;
11318  size_t madeLostCount = 0;
11319  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
11320  {
11321  if(index == suballocations->size())
11322  {
11323  index = 0;
11324  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
11325  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11326  {
11327  suballocations = &AccessSuballocations2nd();
11328  }
11329  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
11330  // suballocations continues pointing at AccessSuballocations1st().
11331  VMA_ASSERT(!suballocations->empty());
11332  }
11333  VmaSuballocation& suballoc = (*suballocations)[index];
11334  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11335  {
11336  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11337  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
11338  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11339  {
11340  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11341  suballoc.hAllocation = VK_NULL_HANDLE;
11342  m_SumFreeSize += suballoc.size;
11343  if(suballocations == &AccessSuballocations1st())
11344  {
11345  ++m_1stNullItemsMiddleCount;
11346  }
11347  else
11348  {
11349  ++m_2ndNullItemsCount;
11350  }
11351  ++madeLostCount;
11352  }
11353  else
11354  {
11355  return false;
11356  }
11357  }
11358  ++index;
11359  }
11360 
11361  CleanupAfterFree();
11362  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
11363 
11364  return true;
11365 }
11366 
11367 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11368 {
11369  uint32_t lostAllocationCount = 0;
11370 
11371  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11372  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11373  {
11374  VmaSuballocation& suballoc = suballocations1st[i];
11375  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11376  suballoc.hAllocation->CanBecomeLost() &&
11377  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11378  {
11379  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11380  suballoc.hAllocation = VK_NULL_HANDLE;
11381  ++m_1stNullItemsMiddleCount;
11382  m_SumFreeSize += suballoc.size;
11383  ++lostAllocationCount;
11384  }
11385  }
11386 
11387  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11388  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11389  {
11390  VmaSuballocation& suballoc = suballocations2nd[i];
11391  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11392  suballoc.hAllocation->CanBecomeLost() &&
11393  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11394  {
11395  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11396  suballoc.hAllocation = VK_NULL_HANDLE;
11397  ++m_2ndNullItemsCount;
11398  m_SumFreeSize += suballoc.size;
11399  ++lostAllocationCount;
11400  }
11401  }
11402 
11403  if(lostAllocationCount)
11404  {
11405  CleanupAfterFree();
11406  }
11407 
11408  return lostAllocationCount;
11409 }
11410 
11411 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
11412 {
11413  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11414  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11415  {
11416  const VmaSuballocation& suballoc = suballocations1st[i];
11417  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11418  {
11419  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11420  {
11421  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11422  return VK_ERROR_VALIDATION_FAILED_EXT;
11423  }
11424  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11425  {
11426  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11427  return VK_ERROR_VALIDATION_FAILED_EXT;
11428  }
11429  }
11430  }
11431 
11432  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11433  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11434  {
11435  const VmaSuballocation& suballoc = suballocations2nd[i];
11436  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11437  {
11438  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11439  {
11440  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11441  return VK_ERROR_VALIDATION_FAILED_EXT;
11442  }
11443  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11444  {
11445  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11446  return VK_ERROR_VALIDATION_FAILED_EXT;
11447  }
11448  }
11449  }
11450 
11451  return VK_SUCCESS;
11452 }
11453 
11454 void VmaBlockMetadata_Linear::Alloc(
11455  const VmaAllocationRequest& request,
11456  VmaSuballocationType type,
11457  VkDeviceSize allocSize,
11458  VmaAllocation hAllocation)
11459 {
11460  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
11461 
11462  switch(request.type)
11463  {
11464  case VmaAllocationRequestType::UpperAddress:
11465  {
11466  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
11467  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
11468  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11469  suballocations2nd.push_back(newSuballoc);
11470  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
11471  }
11472  break;
11473  case VmaAllocationRequestType::EndOf1st:
11474  {
11475  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11476 
11477  VMA_ASSERT(suballocations1st.empty() ||
11478  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
11479  // Check if it fits before the end of the block.
11480  VMA_ASSERT(request.offset + allocSize <= GetSize());
11481 
11482  suballocations1st.push_back(newSuballoc);
11483  }
11484  break;
11485  case VmaAllocationRequestType::EndOf2nd:
11486  {
11487  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11488  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
11489  VMA_ASSERT(!suballocations1st.empty() &&
11490  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
11491  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11492 
11493  switch(m_2ndVectorMode)
11494  {
11495  case SECOND_VECTOR_EMPTY:
11496  // First allocation from second part ring buffer.
11497  VMA_ASSERT(suballocations2nd.empty());
11498  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
11499  break;
11500  case SECOND_VECTOR_RING_BUFFER:
11501  // 2-part ring buffer is already started.
11502  VMA_ASSERT(!suballocations2nd.empty());
11503  break;
11504  case SECOND_VECTOR_DOUBLE_STACK:
11505  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
11506  break;
11507  default:
11508  VMA_ASSERT(0);
11509  }
11510 
11511  suballocations2nd.push_back(newSuballoc);
11512  }
11513  break;
11514  default:
11515  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
11516  }
11517 
11518  m_SumFreeSize -= newSuballoc.size;
11519 }
11520 
11521 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
11522 {
11523  FreeAtOffset(allocation->GetOffset());
11524 }
11525 
11526 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
11527 {
11528  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11529  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11530 
11531  if(!suballocations1st.empty())
11532  {
11533  // First allocation: Mark it as next empty at the beginning.
11534  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
11535  if(firstSuballoc.offset == offset)
11536  {
11537  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11538  firstSuballoc.hAllocation = VK_NULL_HANDLE;
11539  m_SumFreeSize += firstSuballoc.size;
11540  ++m_1stNullItemsBeginCount;
11541  CleanupAfterFree();
11542  return;
11543  }
11544  }
11545 
11546  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
11547  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
11548  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11549  {
11550  VmaSuballocation& lastSuballoc = suballocations2nd.back();
11551  if(lastSuballoc.offset == offset)
11552  {
11553  m_SumFreeSize += lastSuballoc.size;
11554  suballocations2nd.pop_back();
11555  CleanupAfterFree();
11556  return;
11557  }
11558  }
11559  // Last allocation in 1st vector.
11560  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
11561  {
11562  VmaSuballocation& lastSuballoc = suballocations1st.back();
11563  if(lastSuballoc.offset == offset)
11564  {
11565  m_SumFreeSize += lastSuballoc.size;
11566  suballocations1st.pop_back();
11567  CleanupAfterFree();
11568  return;
11569  }
11570  }
11571 
11572  // Item from the middle of 1st vector.
11573  {
11574  VmaSuballocation refSuballoc;
11575  refSuballoc.offset = offset;
11576  // Rest of members stays uninitialized intentionally for better performance.
11577  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11578  suballocations1st.begin() + m_1stNullItemsBeginCount,
11579  suballocations1st.end(),
11580  refSuballoc,
11581  VmaSuballocationOffsetLess());
11582  if(it != suballocations1st.end())
11583  {
11584  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11585  it->hAllocation = VK_NULL_HANDLE;
11586  ++m_1stNullItemsMiddleCount;
11587  m_SumFreeSize += it->size;
11588  CleanupAfterFree();
11589  return;
11590  }
11591  }
11592 
11593  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11594  {
11595  // Item from the middle of 2nd vector.
11596  VmaSuballocation refSuballoc;
11597  refSuballoc.offset = offset;
11598  // Rest of members stays uninitialized intentionally for better performance.
11599  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11600  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11601  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11602  if(it != suballocations2nd.end())
11603  {
11604  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11605  it->hAllocation = VK_NULL_HANDLE;
11606  ++m_2ndNullItemsCount;
11607  m_SumFreeSize += it->size;
11608  CleanupAfterFree();
11609  return;
11610  }
11611  }
11612 
11613  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11614 }
11615 
11616 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11617 {
11618  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11619  const size_t suballocCount = AccessSuballocations1st().size();
11620  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11621 }
11622 
11623 void VmaBlockMetadata_Linear::CleanupAfterFree()
11624 {
11625  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11626  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11627 
11628  if(IsEmpty())
11629  {
11630  suballocations1st.clear();
11631  suballocations2nd.clear();
11632  m_1stNullItemsBeginCount = 0;
11633  m_1stNullItemsMiddleCount = 0;
11634  m_2ndNullItemsCount = 0;
11635  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11636  }
11637  else
11638  {
11639  const size_t suballoc1stCount = suballocations1st.size();
11640  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11641  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
11642 
11643  // Find more null items at the beginning of 1st vector.
11644  while(m_1stNullItemsBeginCount < suballoc1stCount &&
11645  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11646  {
11647  ++m_1stNullItemsBeginCount;
11648  --m_1stNullItemsMiddleCount;
11649  }
11650 
11651  // Find more null items at the end of 1st vector.
11652  while(m_1stNullItemsMiddleCount > 0 &&
11653  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
11654  {
11655  --m_1stNullItemsMiddleCount;
11656  suballocations1st.pop_back();
11657  }
11658 
11659  // Find more null items at the end of 2nd vector.
11660  while(m_2ndNullItemsCount > 0 &&
11661  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
11662  {
11663  --m_2ndNullItemsCount;
11664  suballocations2nd.pop_back();
11665  }
11666 
11667  // Find more null items at the beginning of 2nd vector.
11668  while(m_2ndNullItemsCount > 0 &&
11669  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
11670  {
11671  --m_2ndNullItemsCount;
11672  VmaVectorRemove(suballocations2nd, 0);
11673  }
11674 
11675  if(ShouldCompact1st())
11676  {
11677  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
11678  size_t srcIndex = m_1stNullItemsBeginCount;
11679  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
11680  {
11681  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
11682  {
11683  ++srcIndex;
11684  }
11685  if(dstIndex != srcIndex)
11686  {
11687  suballocations1st[dstIndex] = suballocations1st[srcIndex];
11688  }
11689  ++srcIndex;
11690  }
11691  suballocations1st.resize(nonNullItemCount);
11692  m_1stNullItemsBeginCount = 0;
11693  m_1stNullItemsMiddleCount = 0;
11694  }
11695 
11696  // 2nd vector became empty.
11697  if(suballocations2nd.empty())
11698  {
11699  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11700  }
11701 
11702  // 1st vector became empty.
11703  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
11704  {
11705  suballocations1st.clear();
11706  m_1stNullItemsBeginCount = 0;
11707 
11708  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11709  {
11710  // Swap 1st with 2nd. Now 2nd is empty.
11711  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11712  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
11713  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
11714  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11715  {
11716  ++m_1stNullItemsBeginCount;
11717  --m_1stNullItemsMiddleCount;
11718  }
11719  m_2ndNullItemsCount = 0;
11720  m_1stVectorIndex ^= 1;
11721  }
11722  }
11723  }
11724 
11725  VMA_HEAVY_ASSERT(Validate());
11726 }
11727 
11728 
11730 // class VmaBlockMetadata_Buddy
11731 
11732 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
11733  VmaBlockMetadata(hAllocator),
11734  m_Root(VMA_NULL),
11735  m_AllocationCount(0),
11736  m_FreeCount(1),
11737  m_SumFreeSize(0)
11738 {
11739  memset(m_FreeList, 0, sizeof(m_FreeList));
11740 }
11741 
11742 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
11743 {
11744  DeleteNode(m_Root);
11745 }
11746 
11747 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
11748 {
11749  VmaBlockMetadata::Init(size);
11750 
11751  m_UsableSize = VmaPrevPow2(size);
11752  m_SumFreeSize = m_UsableSize;
11753 
11754  // Calculate m_LevelCount.
11755  m_LevelCount = 1;
11756  while(m_LevelCount < MAX_LEVELS &&
11757  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
11758  {
11759  ++m_LevelCount;
11760  }
11761 
11762  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
11763  rootNode->offset = 0;
11764  rootNode->type = Node::TYPE_FREE;
11765  rootNode->parent = VMA_NULL;
11766  rootNode->buddy = VMA_NULL;
11767 
11768  m_Root = rootNode;
11769  AddToFreeListFront(0, rootNode);
11770 }
11771 
11772 bool VmaBlockMetadata_Buddy::Validate() const
11773 {
11774  // Validate tree.
11775  ValidationContext ctx;
11776  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
11777  {
11778  VMA_VALIDATE(false && "ValidateNode failed.");
11779  }
11780  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
11781  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
11782 
11783  // Validate free node lists.
11784  for(uint32_t level = 0; level < m_LevelCount; ++level)
11785  {
11786  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
11787  m_FreeList[level].front->free.prev == VMA_NULL);
11788 
11789  for(Node* node = m_FreeList[level].front;
11790  node != VMA_NULL;
11791  node = node->free.next)
11792  {
11793  VMA_VALIDATE(node->type == Node::TYPE_FREE);
11794 
11795  if(node->free.next == VMA_NULL)
11796  {
11797  VMA_VALIDATE(m_FreeList[level].back == node);
11798  }
11799  else
11800  {
11801  VMA_VALIDATE(node->free.next->free.prev == node);
11802  }
11803  }
11804  }
11805 
11806  // Validate that free lists ar higher levels are empty.
11807  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
11808  {
11809  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
11810  }
11811 
11812  return true;
11813 }
11814 
11815 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
11816 {
11817  for(uint32_t level = 0; level < m_LevelCount; ++level)
11818  {
11819  if(m_FreeList[level].front != VMA_NULL)
11820  {
11821  return LevelToNodeSize(level);
11822  }
11823  }
11824  return 0;
11825 }
11826 
11827 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
11828 {
11829  const VkDeviceSize unusableSize = GetUnusableSize();
11830 
11831  outInfo.blockCount = 1;
11832 
11833  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
11834  outInfo.usedBytes = outInfo.unusedBytes = 0;
11835 
11836  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11837  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11838  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11839 
11840  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11841 
11842  if(unusableSize > 0)
11843  {
11844  ++outInfo.unusedRangeCount;
11845  outInfo.unusedBytes += unusableSize;
11846  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11847  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11848  }
11849 }
11850 
11851 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11852 {
11853  const VkDeviceSize unusableSize = GetUnusableSize();
11854 
11855  inoutStats.size += GetSize();
11856  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11857  inoutStats.allocationCount += m_AllocationCount;
11858  inoutStats.unusedRangeCount += m_FreeCount;
11859  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11860 
11861  if(unusableSize > 0)
11862  {
11863  ++inoutStats.unusedRangeCount;
11864  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11865  }
11866 }
11867 
11868 #if VMA_STATS_STRING_ENABLED
11869 
11870 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11871 {
11872  // TODO optimize
11873  VmaStatInfo stat;
11874  CalcAllocationStatInfo(stat);
11875 
11876  PrintDetailedMap_Begin(
11877  json,
11878  stat.unusedBytes,
11879  stat.allocationCount,
11880  stat.unusedRangeCount);
11881 
11882  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11883 
11884  const VkDeviceSize unusableSize = GetUnusableSize();
11885  if(unusableSize > 0)
11886  {
11887  PrintDetailedMap_UnusedRange(json,
11888  m_UsableSize, // offset
11889  unusableSize); // size
11890  }
11891 
11892  PrintDetailedMap_End(json);
11893 }
11894 
11895 #endif // #if VMA_STATS_STRING_ENABLED
11896 
11897 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11898  uint32_t currentFrameIndex,
11899  uint32_t frameInUseCount,
11900  VkDeviceSize bufferImageGranularity,
11901  VkDeviceSize allocSize,
11902  VkDeviceSize allocAlignment,
11903  bool upperAddress,
11904  VmaSuballocationType allocType,
11905  bool canMakeOtherLost,
11906  uint32_t strategy,
11907  VmaAllocationRequest* pAllocationRequest)
11908 {
11909  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11910 
11911  // Simple way to respect bufferImageGranularity. May be optimized some day.
11912  // Whenever it might be an OPTIMAL image...
11913  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11914  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11915  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11916  {
11917  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11918  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11919  }
11920 
11921  if(allocSize > m_UsableSize)
11922  {
11923  return false;
11924  }
11925 
11926  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11927  for(uint32_t level = targetLevel + 1; level--; )
11928  {
11929  for(Node* freeNode = m_FreeList[level].front;
11930  freeNode != VMA_NULL;
11931  freeNode = freeNode->free.next)
11932  {
11933  if(freeNode->offset % allocAlignment == 0)
11934  {
11935  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11936  pAllocationRequest->offset = freeNode->offset;
11937  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11938  pAllocationRequest->sumItemSize = 0;
11939  pAllocationRequest->itemsToMakeLostCount = 0;
11940  pAllocationRequest->customData = (void*)(uintptr_t)level;
11941  return true;
11942  }
11943  }
11944  }
11945 
11946  return false;
11947 }
11948 
11949 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11950  uint32_t currentFrameIndex,
11951  uint32_t frameInUseCount,
11952  VmaAllocationRequest* pAllocationRequest)
11953 {
11954  /*
11955  Lost allocations are not supported in buddy allocator at the moment.
11956  Support might be added in the future.
11957  */
11958  return pAllocationRequest->itemsToMakeLostCount == 0;
11959 }
11960 
11961 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11962 {
11963  /*
11964  Lost allocations are not supported in buddy allocator at the moment.
11965  Support might be added in the future.
11966  */
11967  return 0;
11968 }
11969 
11970 void VmaBlockMetadata_Buddy::Alloc(
11971  const VmaAllocationRequest& request,
11972  VmaSuballocationType type,
11973  VkDeviceSize allocSize,
11974  VmaAllocation hAllocation)
11975 {
11976  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11977 
11978  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11979  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11980 
11981  Node* currNode = m_FreeList[currLevel].front;
11982  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11983  while(currNode->offset != request.offset)
11984  {
11985  currNode = currNode->free.next;
11986  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11987  }
11988 
11989  // Go down, splitting free nodes.
11990  while(currLevel < targetLevel)
11991  {
11992  // currNode is already first free node at currLevel.
11993  // Remove it from list of free nodes at this currLevel.
11994  RemoveFromFreeList(currLevel, currNode);
11995 
11996  const uint32_t childrenLevel = currLevel + 1;
11997 
11998  // Create two free sub-nodes.
11999  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
12000  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
12001 
12002  leftChild->offset = currNode->offset;
12003  leftChild->type = Node::TYPE_FREE;
12004  leftChild->parent = currNode;
12005  leftChild->buddy = rightChild;
12006 
12007  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
12008  rightChild->type = Node::TYPE_FREE;
12009  rightChild->parent = currNode;
12010  rightChild->buddy = leftChild;
12011 
12012  // Convert current currNode to split type.
12013  currNode->type = Node::TYPE_SPLIT;
12014  currNode->split.leftChild = leftChild;
12015 
12016  // Add child nodes to free list. Order is important!
12017  AddToFreeListFront(childrenLevel, rightChild);
12018  AddToFreeListFront(childrenLevel, leftChild);
12019 
12020  ++m_FreeCount;
12021  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
12022  ++currLevel;
12023  currNode = m_FreeList[currLevel].front;
12024 
12025  /*
12026  We can be sure that currNode, as left child of node previously split,
12027  also fullfills the alignment requirement.
12028  */
12029  }
12030 
12031  // Remove from free list.
12032  VMA_ASSERT(currLevel == targetLevel &&
12033  currNode != VMA_NULL &&
12034  currNode->type == Node::TYPE_FREE);
12035  RemoveFromFreeList(currLevel, currNode);
12036 
12037  // Convert to allocation node.
12038  currNode->type = Node::TYPE_ALLOCATION;
12039  currNode->allocation.alloc = hAllocation;
12040 
12041  ++m_AllocationCount;
12042  --m_FreeCount;
12043  m_SumFreeSize -= allocSize;
12044 }
12045 
12046 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
12047 {
12048  if(node->type == Node::TYPE_SPLIT)
12049  {
12050  DeleteNode(node->split.leftChild->buddy);
12051  DeleteNode(node->split.leftChild);
12052  }
12053 
12054  vma_delete(GetAllocationCallbacks(), node);
12055 }
12056 
12057 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
12058 {
12059  VMA_VALIDATE(level < m_LevelCount);
12060  VMA_VALIDATE(curr->parent == parent);
12061  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
12062  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
12063  switch(curr->type)
12064  {
12065  case Node::TYPE_FREE:
12066  // curr->free.prev, next are validated separately.
12067  ctx.calculatedSumFreeSize += levelNodeSize;
12068  ++ctx.calculatedFreeCount;
12069  break;
12070  case Node::TYPE_ALLOCATION:
12071  ++ctx.calculatedAllocationCount;
12072  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
12073  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
12074  break;
12075  case Node::TYPE_SPLIT:
12076  {
12077  const uint32_t childrenLevel = level + 1;
12078  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
12079  const Node* const leftChild = curr->split.leftChild;
12080  VMA_VALIDATE(leftChild != VMA_NULL);
12081  VMA_VALIDATE(leftChild->offset == curr->offset);
12082  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
12083  {
12084  VMA_VALIDATE(false && "ValidateNode for left child failed.");
12085  }
12086  const Node* const rightChild = leftChild->buddy;
12087  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
12088  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
12089  {
12090  VMA_VALIDATE(false && "ValidateNode for right child failed.");
12091  }
12092  }
12093  break;
12094  default:
12095  return false;
12096  }
12097 
12098  return true;
12099 }
12100 
12101 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
12102 {
12103  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
12104  uint32_t level = 0;
12105  VkDeviceSize currLevelNodeSize = m_UsableSize;
12106  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
12107  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
12108  {
12109  ++level;
12110  currLevelNodeSize = nextLevelNodeSize;
12111  nextLevelNodeSize = currLevelNodeSize >> 1;
12112  }
12113  return level;
12114 }
12115 
12116 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
12117 {
12118  // Find node and level.
12119  Node* node = m_Root;
12120  VkDeviceSize nodeOffset = 0;
12121  uint32_t level = 0;
12122  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
12123  while(node->type == Node::TYPE_SPLIT)
12124  {
12125  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
12126  if(offset < nodeOffset + nextLevelSize)
12127  {
12128  node = node->split.leftChild;
12129  }
12130  else
12131  {
12132  node = node->split.leftChild->buddy;
12133  nodeOffset += nextLevelSize;
12134  }
12135  ++level;
12136  levelNodeSize = nextLevelSize;
12137  }
12138 
12139  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
12140  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
12141 
12142  ++m_FreeCount;
12143  --m_AllocationCount;
12144  m_SumFreeSize += alloc->GetSize();
12145 
12146  node->type = Node::TYPE_FREE;
12147 
12148  // Join free nodes if possible.
12149  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
12150  {
12151  RemoveFromFreeList(level, node->buddy);
12152  Node* const parent = node->parent;
12153 
12154  vma_delete(GetAllocationCallbacks(), node->buddy);
12155  vma_delete(GetAllocationCallbacks(), node);
12156  parent->type = Node::TYPE_FREE;
12157 
12158  node = parent;
12159  --level;
12160  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
12161  --m_FreeCount;
12162  }
12163 
12164  AddToFreeListFront(level, node);
12165 }
12166 
12167 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
12168 {
12169  switch(node->type)
12170  {
12171  case Node::TYPE_FREE:
12172  ++outInfo.unusedRangeCount;
12173  outInfo.unusedBytes += levelNodeSize;
12174  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
12175  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
12176  break;
12177  case Node::TYPE_ALLOCATION:
12178  {
12179  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12180  ++outInfo.allocationCount;
12181  outInfo.usedBytes += allocSize;
12182  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
12183  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
12184 
12185  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
12186  if(unusedRangeSize > 0)
12187  {
12188  ++outInfo.unusedRangeCount;
12189  outInfo.unusedBytes += unusedRangeSize;
12190  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
12191  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
12192  }
12193  }
12194  break;
12195  case Node::TYPE_SPLIT:
12196  {
12197  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12198  const Node* const leftChild = node->split.leftChild;
12199  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
12200  const Node* const rightChild = leftChild->buddy;
12201  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
12202  }
12203  break;
12204  default:
12205  VMA_ASSERT(0);
12206  }
12207 }
12208 
12209 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
12210 {
12211  VMA_ASSERT(node->type == Node::TYPE_FREE);
12212 
12213  // List is empty.
12214  Node* const frontNode = m_FreeList[level].front;
12215  if(frontNode == VMA_NULL)
12216  {
12217  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
12218  node->free.prev = node->free.next = VMA_NULL;
12219  m_FreeList[level].front = m_FreeList[level].back = node;
12220  }
12221  else
12222  {
12223  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
12224  node->free.prev = VMA_NULL;
12225  node->free.next = frontNode;
12226  frontNode->free.prev = node;
12227  m_FreeList[level].front = node;
12228  }
12229 }
12230 
12231 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
12232 {
12233  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
12234 
12235  // It is at the front.
12236  if(node->free.prev == VMA_NULL)
12237  {
12238  VMA_ASSERT(m_FreeList[level].front == node);
12239  m_FreeList[level].front = node->free.next;
12240  }
12241  else
12242  {
12243  Node* const prevFreeNode = node->free.prev;
12244  VMA_ASSERT(prevFreeNode->free.next == node);
12245  prevFreeNode->free.next = node->free.next;
12246  }
12247 
12248  // It is at the back.
12249  if(node->free.next == VMA_NULL)
12250  {
12251  VMA_ASSERT(m_FreeList[level].back == node);
12252  m_FreeList[level].back = node->free.prev;
12253  }
12254  else
12255  {
12256  Node* const nextFreeNode = node->free.next;
12257  VMA_ASSERT(nextFreeNode->free.prev == node);
12258  nextFreeNode->free.prev = node->free.prev;
12259  }
12260 }
12261 
12262 #if VMA_STATS_STRING_ENABLED
12263 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
12264 {
12265  switch(node->type)
12266  {
12267  case Node::TYPE_FREE:
12268  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
12269  break;
12270  case Node::TYPE_ALLOCATION:
12271  {
12272  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
12273  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12274  if(allocSize < levelNodeSize)
12275  {
12276  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
12277  }
12278  }
12279  break;
12280  case Node::TYPE_SPLIT:
12281  {
12282  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12283  const Node* const leftChild = node->split.leftChild;
12284  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
12285  const Node* const rightChild = leftChild->buddy;
12286  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
12287  }
12288  break;
12289  default:
12290  VMA_ASSERT(0);
12291  }
12292 }
12293 #endif // #if VMA_STATS_STRING_ENABLED
12294 
12295 
12297 // class VmaDeviceMemoryBlock
12298 
12299 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
12300  m_pMetadata(VMA_NULL),
12301  m_MemoryTypeIndex(UINT32_MAX),
12302  m_Id(0),
12303  m_hMemory(VK_NULL_HANDLE),
12304  m_MapCount(0),
12305  m_pMappedData(VMA_NULL)
12306 {
12307 }
12308 
12309 void VmaDeviceMemoryBlock::Init(
12310  VmaAllocator hAllocator,
12311  VmaPool hParentPool,
12312  uint32_t newMemoryTypeIndex,
12313  VkDeviceMemory newMemory,
12314  VkDeviceSize newSize,
12315  uint32_t id,
12316  uint32_t algorithm)
12317 {
12318  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
12319 
12320  m_hParentPool = hParentPool;
12321  m_MemoryTypeIndex = newMemoryTypeIndex;
12322  m_Id = id;
12323  m_hMemory = newMemory;
12324 
12325  switch(algorithm)
12326  {
12328  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
12329  break;
12331  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
12332  break;
12333  default:
12334  VMA_ASSERT(0);
12335  // Fall-through.
12336  case 0:
12337  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
12338  }
12339  m_pMetadata->Init(newSize);
12340 }
12341 
12342 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
12343 {
12344  // This is the most important assert in the entire library.
12345  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
12346  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
12347 
12348  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
12349  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
12350  m_hMemory = VK_NULL_HANDLE;
12351 
12352  vma_delete(allocator, m_pMetadata);
12353  m_pMetadata = VMA_NULL;
12354 }
12355 
12356 bool VmaDeviceMemoryBlock::Validate() const
12357 {
12358  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
12359  (m_pMetadata->GetSize() != 0));
12360 
12361  return m_pMetadata->Validate();
12362 }
12363 
12364 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
12365 {
12366  void* pData = nullptr;
12367  VkResult res = Map(hAllocator, 1, &pData);
12368  if(res != VK_SUCCESS)
12369  {
12370  return res;
12371  }
12372 
12373  res = m_pMetadata->CheckCorruption(pData);
12374 
12375  Unmap(hAllocator, 1);
12376 
12377  return res;
12378 }
12379 
12380 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
12381 {
12382  if(count == 0)
12383  {
12384  return VK_SUCCESS;
12385  }
12386 
12387  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12388  if(m_MapCount != 0)
12389  {
12390  m_MapCount += count;
12391  VMA_ASSERT(m_pMappedData != VMA_NULL);
12392  if(ppData != VMA_NULL)
12393  {
12394  *ppData = m_pMappedData;
12395  }
12396  return VK_SUCCESS;
12397  }
12398  else
12399  {
12400  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
12401  hAllocator->m_hDevice,
12402  m_hMemory,
12403  0, // offset
12404  VK_WHOLE_SIZE,
12405  0, // flags
12406  &m_pMappedData);
12407  if(result == VK_SUCCESS)
12408  {
12409  if(ppData != VMA_NULL)
12410  {
12411  *ppData = m_pMappedData;
12412  }
12413  m_MapCount = count;
12414  }
12415  return result;
12416  }
12417 }
12418 
12419 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
12420 {
12421  if(count == 0)
12422  {
12423  return;
12424  }
12425 
12426  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12427  if(m_MapCount >= count)
12428  {
12429  m_MapCount -= count;
12430  if(m_MapCount == 0)
12431  {
12432  m_pMappedData = VMA_NULL;
12433  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
12434  }
12435  }
12436  else
12437  {
12438  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
12439  }
12440 }
12441 
12442 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12443 {
12444  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12445  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12446 
12447  void* pData;
12448  VkResult res = Map(hAllocator, 1, &pData);
12449  if(res != VK_SUCCESS)
12450  {
12451  return res;
12452  }
12453 
12454  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
12455  VmaWriteMagicValue(pData, allocOffset + allocSize);
12456 
12457  Unmap(hAllocator, 1);
12458 
12459  return VK_SUCCESS;
12460 }
12461 
12462 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12463 {
12464  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12465  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12466 
12467  void* pData;
12468  VkResult res = Map(hAllocator, 1, &pData);
12469  if(res != VK_SUCCESS)
12470  {
12471  return res;
12472  }
12473 
12474  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
12475  {
12476  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
12477  }
12478  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
12479  {
12480  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
12481  }
12482 
12483  Unmap(hAllocator, 1);
12484 
12485  return VK_SUCCESS;
12486 }
12487 
12488 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
12489  const VmaAllocator hAllocator,
12490  const VmaAllocation hAllocation,
12491  VkDeviceSize allocationLocalOffset,
12492  VkBuffer hBuffer,
12493  const void* pNext)
12494 {
12495  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12496  hAllocation->GetBlock() == this);
12497  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12498  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12499  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12500  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12501  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12502  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
12503 }
12504 
12505 VkResult VmaDeviceMemoryBlock::BindImageMemory(
12506  const VmaAllocator hAllocator,
12507  const VmaAllocation hAllocation,
12508  VkDeviceSize allocationLocalOffset,
12509  VkImage hImage,
12510  const void* pNext)
12511 {
12512  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12513  hAllocation->GetBlock() == this);
12514  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12515  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12516  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12517  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12518  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12519  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
12520 }
12521 
12522 static void InitStatInfo(VmaStatInfo& outInfo)
12523 {
12524  memset(&outInfo, 0, sizeof(outInfo));
12525  outInfo.allocationSizeMin = UINT64_MAX;
12526  outInfo.unusedRangeSizeMin = UINT64_MAX;
12527 }
12528 
12529 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
12530 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
12531 {
12532  inoutInfo.blockCount += srcInfo.blockCount;
12533  inoutInfo.allocationCount += srcInfo.allocationCount;
12534  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
12535  inoutInfo.usedBytes += srcInfo.usedBytes;
12536  inoutInfo.unusedBytes += srcInfo.unusedBytes;
12537  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
12538  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
12539  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
12540  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
12541 }
12542 
12543 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
12544 {
12545  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
12546  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
12547  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
12548  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
12549 }
12550 
12551 VmaPool_T::VmaPool_T(
12552  VmaAllocator hAllocator,
12553  const VmaPoolCreateInfo& createInfo,
12554  VkDeviceSize preferredBlockSize) :
12555  m_BlockVector(
12556  hAllocator,
12557  this, // hParentPool
12558  createInfo.memoryTypeIndex,
12559  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
12560  createInfo.minBlockCount,
12561  createInfo.maxBlockCount,
12562  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
12563  createInfo.frameInUseCount,
12564  createInfo.blockSize != 0, // explicitBlockSize
12565  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
12566  m_Id(0),
12567  m_Name(VMA_NULL)
12568 {
12569 }
12570 
12571 VmaPool_T::~VmaPool_T()
12572 {
12573 }
12574 
12575 void VmaPool_T::SetName(const char* pName)
12576 {
12577  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12578  VmaFreeString(allocs, m_Name);
12579 
12580  if(pName != VMA_NULL)
12581  {
12582  m_Name = VmaCreateStringCopy(allocs, pName);
12583  }
12584  else
12585  {
12586  m_Name = VMA_NULL;
12587  }
12588 }
12589 
12590 #if VMA_STATS_STRING_ENABLED
12591 
12592 #endif // #if VMA_STATS_STRING_ENABLED
12593 
12594 VmaBlockVector::VmaBlockVector(
12595  VmaAllocator hAllocator,
12596  VmaPool hParentPool,
12597  uint32_t memoryTypeIndex,
12598  VkDeviceSize preferredBlockSize,
12599  size_t minBlockCount,
12600  size_t maxBlockCount,
12601  VkDeviceSize bufferImageGranularity,
12602  uint32_t frameInUseCount,
12603  bool explicitBlockSize,
12604  uint32_t algorithm) :
12605  m_hAllocator(hAllocator),
12606  m_hParentPool(hParentPool),
12607  m_MemoryTypeIndex(memoryTypeIndex),
12608  m_PreferredBlockSize(preferredBlockSize),
12609  m_MinBlockCount(minBlockCount),
12610  m_MaxBlockCount(maxBlockCount),
12611  m_BufferImageGranularity(bufferImageGranularity),
12612  m_FrameInUseCount(frameInUseCount),
12613  m_ExplicitBlockSize(explicitBlockSize),
12614  m_Algorithm(algorithm),
12615  m_HasEmptyBlock(false),
12616  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12617  m_NextBlockId(0)
12618 {
12619 }
12620 
12621 VmaBlockVector::~VmaBlockVector()
12622 {
12623  for(size_t i = m_Blocks.size(); i--; )
12624  {
12625  m_Blocks[i]->Destroy(m_hAllocator);
12626  vma_delete(m_hAllocator, m_Blocks[i]);
12627  }
12628 }
12629 
12630 VkResult VmaBlockVector::CreateMinBlocks()
12631 {
12632  for(size_t i = 0; i < m_MinBlockCount; ++i)
12633  {
12634  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12635  if(res != VK_SUCCESS)
12636  {
12637  return res;
12638  }
12639  }
12640  return VK_SUCCESS;
12641 }
12642 
12643 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
12644 {
12645  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12646 
12647  const size_t blockCount = m_Blocks.size();
12648 
12649  pStats->size = 0;
12650  pStats->unusedSize = 0;
12651  pStats->allocationCount = 0;
12652  pStats->unusedRangeCount = 0;
12653  pStats->unusedRangeSizeMax = 0;
12654  pStats->blockCount = blockCount;
12655 
12656  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12657  {
12658  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12659  VMA_ASSERT(pBlock);
12660  VMA_HEAVY_ASSERT(pBlock->Validate());
12661  pBlock->m_pMetadata->AddPoolStats(*pStats);
12662  }
12663 }
12664 
12665 bool VmaBlockVector::IsEmpty()
12666 {
12667  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12668  return m_Blocks.empty();
12669 }
12670 
12671 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12672 {
12673  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12674  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12675  (VMA_DEBUG_MARGIN > 0) &&
12676  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12677  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12678 }
12679 
12680 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
12681 
12682 VkResult VmaBlockVector::Allocate(
12683  uint32_t currentFrameIndex,
12684  VkDeviceSize size,
12685  VkDeviceSize alignment,
12686  const VmaAllocationCreateInfo& createInfo,
12687  VmaSuballocationType suballocType,
12688  size_t allocationCount,
12689  VmaAllocation* pAllocations)
12690 {
12691  size_t allocIndex;
12692  VkResult res = VK_SUCCESS;
12693 
12694  if(IsCorruptionDetectionEnabled())
12695  {
12696  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12697  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12698  }
12699 
12700  {
12701  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12702  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12703  {
12704  res = AllocatePage(
12705  currentFrameIndex,
12706  size,
12707  alignment,
12708  createInfo,
12709  suballocType,
12710  pAllocations + allocIndex);
12711  if(res != VK_SUCCESS)
12712  {
12713  break;
12714  }
12715  }
12716  }
12717 
12718  if(res != VK_SUCCESS)
12719  {
12720  // Free all already created allocations.
12721  while(allocIndex--)
12722  {
12723  Free(pAllocations[allocIndex]);
12724  }
12725  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12726  }
12727 
12728  return res;
12729 }
12730 
12731 VkResult VmaBlockVector::AllocatePage(
12732  uint32_t currentFrameIndex,
12733  VkDeviceSize size,
12734  VkDeviceSize alignment,
12735  const VmaAllocationCreateInfo& createInfo,
12736  VmaSuballocationType suballocType,
12737  VmaAllocation* pAllocation)
12738 {
12739  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12740  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
12741  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12742  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12743 
12744  VkDeviceSize freeMemory;
12745  {
12746  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12747  VmaBudget heapBudget = {};
12748  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12749  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12750  }
12751 
12752  const bool canFallbackToDedicated = !IsCustomPool();
12753  const bool canCreateNewBlock =
12754  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12755  (m_Blocks.size() < m_MaxBlockCount) &&
12756  (freeMemory >= size || !canFallbackToDedicated);
12757  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12758 
12759  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
12760  // Which in turn is available only when maxBlockCount = 1.
12761  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
12762  {
12763  canMakeOtherLost = false;
12764  }
12765 
12766  // Upper address can only be used with linear allocator and within single memory block.
12767  if(isUpperAddress &&
12768  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12769  {
12770  return VK_ERROR_FEATURE_NOT_PRESENT;
12771  }
12772 
12773  // Validate strategy.
12774  switch(strategy)
12775  {
12776  case 0:
12778  break;
12782  break;
12783  default:
12784  return VK_ERROR_FEATURE_NOT_PRESENT;
12785  }
12786 
12787  // Early reject: requested allocation size is larger that maximum block size for this block vector.
12788  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12789  {
12790  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12791  }
12792 
12793  /*
12794  Under certain condition, this whole section can be skipped for optimization, so
12795  we move on directly to trying to allocate with canMakeOtherLost. That's the case
12796  e.g. for custom pools with linear algorithm.
12797  */
12798  if(!canMakeOtherLost || canCreateNewBlock)
12799  {
12800  // 1. Search existing allocations. Try to allocate without making other allocations lost.
12801  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
12803 
12804  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12805  {
12806  // Use only last block.
12807  if(!m_Blocks.empty())
12808  {
12809  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12810  VMA_ASSERT(pCurrBlock);
12811  VkResult res = AllocateFromBlock(
12812  pCurrBlock,
12813  currentFrameIndex,
12814  size,
12815  alignment,
12816  allocFlagsCopy,
12817  createInfo.pUserData,
12818  suballocType,
12819  strategy,
12820  pAllocation);
12821  if(res == VK_SUCCESS)
12822  {
12823  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
12824  return VK_SUCCESS;
12825  }
12826  }
12827  }
12828  else
12829  {
12831  {
12832  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12833  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12834  {
12835  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12836  VMA_ASSERT(pCurrBlock);
12837  VkResult res = AllocateFromBlock(
12838  pCurrBlock,
12839  currentFrameIndex,
12840  size,
12841  alignment,
12842  allocFlagsCopy,
12843  createInfo.pUserData,
12844  suballocType,
12845  strategy,
12846  pAllocation);
12847  if(res == VK_SUCCESS)
12848  {
12849  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12850  return VK_SUCCESS;
12851  }
12852  }
12853  }
12854  else // WORST_FIT, FIRST_FIT
12855  {
12856  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12857  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12858  {
12859  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12860  VMA_ASSERT(pCurrBlock);
12861  VkResult res = AllocateFromBlock(
12862  pCurrBlock,
12863  currentFrameIndex,
12864  size,
12865  alignment,
12866  allocFlagsCopy,
12867  createInfo.pUserData,
12868  suballocType,
12869  strategy,
12870  pAllocation);
12871  if(res == VK_SUCCESS)
12872  {
12873  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12874  return VK_SUCCESS;
12875  }
12876  }
12877  }
12878  }
12879 
12880  // 2. Try to create new block.
12881  if(canCreateNewBlock)
12882  {
12883  // Calculate optimal size for new block.
12884  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12885  uint32_t newBlockSizeShift = 0;
12886  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12887 
12888  if(!m_ExplicitBlockSize)
12889  {
12890  // Allocate 1/8, 1/4, 1/2 as first blocks.
12891  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12892  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12893  {
12894  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12895  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12896  {
12897  newBlockSize = smallerNewBlockSize;
12898  ++newBlockSizeShift;
12899  }
12900  else
12901  {
12902  break;
12903  }
12904  }
12905  }
12906 
12907  size_t newBlockIndex = 0;
12908  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12909  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12910  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12911  if(!m_ExplicitBlockSize)
12912  {
12913  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12914  {
12915  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12916  if(smallerNewBlockSize >= size)
12917  {
12918  newBlockSize = smallerNewBlockSize;
12919  ++newBlockSizeShift;
12920  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12921  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12922  }
12923  else
12924  {
12925  break;
12926  }
12927  }
12928  }
12929 
12930  if(res == VK_SUCCESS)
12931  {
12932  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12933  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12934 
12935  res = AllocateFromBlock(
12936  pBlock,
12937  currentFrameIndex,
12938  size,
12939  alignment,
12940  allocFlagsCopy,
12941  createInfo.pUserData,
12942  suballocType,
12943  strategy,
12944  pAllocation);
12945  if(res == VK_SUCCESS)
12946  {
12947  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12948  return VK_SUCCESS;
12949  }
12950  else
12951  {
12952  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12953  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12954  }
12955  }
12956  }
12957  }
12958 
12959  // 3. Try to allocate from existing blocks with making other allocations lost.
12960  if(canMakeOtherLost)
12961  {
12962  uint32_t tryIndex = 0;
12963  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12964  {
12965  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12966  VmaAllocationRequest bestRequest = {};
12967  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12968 
12969  // 1. Search existing allocations.
12971  {
12972  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12973  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12974  {
12975  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12976  VMA_ASSERT(pCurrBlock);
12977  VmaAllocationRequest currRequest = {};
12978  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12979  currentFrameIndex,
12980  m_FrameInUseCount,
12981  m_BufferImageGranularity,
12982  size,
12983  alignment,
12984  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12985  suballocType,
12986  canMakeOtherLost,
12987  strategy,
12988  &currRequest))
12989  {
12990  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12991  if(pBestRequestBlock == VMA_NULL ||
12992  currRequestCost < bestRequestCost)
12993  {
12994  pBestRequestBlock = pCurrBlock;
12995  bestRequest = currRequest;
12996  bestRequestCost = currRequestCost;
12997 
12998  if(bestRequestCost == 0)
12999  {
13000  break;
13001  }
13002  }
13003  }
13004  }
13005  }
13006  else // WORST_FIT, FIRST_FIT
13007  {
13008  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
13009  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13010  {
13011  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
13012  VMA_ASSERT(pCurrBlock);
13013  VmaAllocationRequest currRequest = {};
13014  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
13015  currentFrameIndex,
13016  m_FrameInUseCount,
13017  m_BufferImageGranularity,
13018  size,
13019  alignment,
13020  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
13021  suballocType,
13022  canMakeOtherLost,
13023  strategy,
13024  &currRequest))
13025  {
13026  const VkDeviceSize currRequestCost = currRequest.CalcCost();
13027  if(pBestRequestBlock == VMA_NULL ||
13028  currRequestCost < bestRequestCost ||
13030  {
13031  pBestRequestBlock = pCurrBlock;
13032  bestRequest = currRequest;
13033  bestRequestCost = currRequestCost;
13034 
13035  if(bestRequestCost == 0 ||
13037  {
13038  break;
13039  }
13040  }
13041  }
13042  }
13043  }
13044 
13045  if(pBestRequestBlock != VMA_NULL)
13046  {
13047  if(mapped)
13048  {
13049  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
13050  if(res != VK_SUCCESS)
13051  {
13052  return res;
13053  }
13054  }
13055 
13056  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
13057  currentFrameIndex,
13058  m_FrameInUseCount,
13059  &bestRequest))
13060  {
13061  // Allocate from this pBlock.
13062  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13063  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
13064  UpdateHasEmptyBlock();
13065  (*pAllocation)->InitBlockAllocation(
13066  pBestRequestBlock,
13067  bestRequest.offset,
13068  alignment,
13069  size,
13070  m_MemoryTypeIndex,
13071  suballocType,
13072  mapped,
13073  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13074  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
13075  VMA_DEBUG_LOG(" Returned from existing block");
13076  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
13077  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13078  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13079  {
13080  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13081  }
13082  if(IsCorruptionDetectionEnabled())
13083  {
13084  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
13085  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13086  }
13087  return VK_SUCCESS;
13088  }
13089  // else: Some allocations must have been touched while we are here. Next try.
13090  }
13091  else
13092  {
13093  // Could not find place in any of the blocks - break outer loop.
13094  break;
13095  }
13096  }
13097  /* Maximum number of tries exceeded - a very unlike event when many other
13098  threads are simultaneously touching allocations making it impossible to make
13099  lost at the same time as we try to allocate. */
13100  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
13101  {
13102  return VK_ERROR_TOO_MANY_OBJECTS;
13103  }
13104  }
13105 
13106  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13107 }
13108 
13109 void VmaBlockVector::Free(
13110  const VmaAllocation hAllocation)
13111 {
13112  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
13113 
13114  bool budgetExceeded = false;
13115  {
13116  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13117  VmaBudget heapBudget = {};
13118  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
13119  budgetExceeded = heapBudget.usage >= heapBudget.budget;
13120  }
13121 
13122  // Scope for lock.
13123  {
13124  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13125 
13126  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13127 
13128  if(IsCorruptionDetectionEnabled())
13129  {
13130  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
13131  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
13132  }
13133 
13134  if(hAllocation->IsPersistentMap())
13135  {
13136  pBlock->Unmap(m_hAllocator, 1);
13137  }
13138 
13139  pBlock->m_pMetadata->Free(hAllocation);
13140  VMA_HEAVY_ASSERT(pBlock->Validate());
13141 
13142  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
13143 
13144  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
13145  // pBlock became empty after this deallocation.
13146  if(pBlock->m_pMetadata->IsEmpty())
13147  {
13148  // Already has empty block. We don't want to have two, so delete this one.
13149  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
13150  {
13151  pBlockToDelete = pBlock;
13152  Remove(pBlock);
13153  }
13154  // else: We now have an empty block - leave it.
13155  }
13156  // pBlock didn't become empty, but we have another empty block - find and free that one.
13157  // (This is optional, heuristics.)
13158  else if(m_HasEmptyBlock && canDeleteBlock)
13159  {
13160  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
13161  if(pLastBlock->m_pMetadata->IsEmpty())
13162  {
13163  pBlockToDelete = pLastBlock;
13164  m_Blocks.pop_back();
13165  }
13166  }
13167 
13168  UpdateHasEmptyBlock();
13169  IncrementallySortBlocks();
13170  }
13171 
13172  // Destruction of a free block. Deferred until this point, outside of mutex
13173  // lock, for performance reason.
13174  if(pBlockToDelete != VMA_NULL)
13175  {
13176  VMA_DEBUG_LOG(" Deleted empty block");
13177  pBlockToDelete->Destroy(m_hAllocator);
13178  vma_delete(m_hAllocator, pBlockToDelete);
13179  }
13180 }
13181 
13182 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
13183 {
13184  VkDeviceSize result = 0;
13185  for(size_t i = m_Blocks.size(); i--; )
13186  {
13187  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
13188  if(result >= m_PreferredBlockSize)
13189  {
13190  break;
13191  }
13192  }
13193  return result;
13194 }
13195 
13196 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
13197 {
13198  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13199  {
13200  if(m_Blocks[blockIndex] == pBlock)
13201  {
13202  VmaVectorRemove(m_Blocks, blockIndex);
13203  return;
13204  }
13205  }
13206  VMA_ASSERT(0);
13207 }
13208 
13209 void VmaBlockVector::IncrementallySortBlocks()
13210 {
13211  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13212  {
13213  // Bubble sort only until first swap.
13214  for(size_t i = 1; i < m_Blocks.size(); ++i)
13215  {
13216  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
13217  {
13218  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
13219  return;
13220  }
13221  }
13222  }
13223 }
13224 
13225 VkResult VmaBlockVector::AllocateFromBlock(
13226  VmaDeviceMemoryBlock* pBlock,
13227  uint32_t currentFrameIndex,
13228  VkDeviceSize size,
13229  VkDeviceSize alignment,
13230  VmaAllocationCreateFlags allocFlags,
13231  void* pUserData,
13232  VmaSuballocationType suballocType,
13233  uint32_t strategy,
13234  VmaAllocation* pAllocation)
13235 {
13236  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
13237  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13238  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13239  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13240 
13241  VmaAllocationRequest currRequest = {};
13242  if(pBlock->m_pMetadata->CreateAllocationRequest(
13243  currentFrameIndex,
13244  m_FrameInUseCount,
13245  m_BufferImageGranularity,
13246  size,
13247  alignment,
13248  isUpperAddress,
13249  suballocType,
13250  false, // canMakeOtherLost
13251  strategy,
13252  &currRequest))
13253  {
13254  // Allocate from pCurrBlock.
13255  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
13256 
13257  if(mapped)
13258  {
13259  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
13260  if(res != VK_SUCCESS)
13261  {
13262  return res;
13263  }
13264  }
13265 
13266  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13267  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
13268  UpdateHasEmptyBlock();
13269  (*pAllocation)->InitBlockAllocation(
13270  pBlock,
13271  currRequest.offset,
13272  alignment,
13273  size,
13274  m_MemoryTypeIndex,
13275  suballocType,
13276  mapped,
13277  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13278  VMA_HEAVY_ASSERT(pBlock->Validate());
13279  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
13280  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13281  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13282  {
13283  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13284  }
13285  if(IsCorruptionDetectionEnabled())
13286  {
13287  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
13288  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13289  }
13290  return VK_SUCCESS;
13291  }
13292  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13293 }
13294 
13295 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
13296 {
13297  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
13298  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
13299  allocInfo.allocationSize = blockSize;
13300 
13301 #if VMA_BUFFER_DEVICE_ADDRESS
13302  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
13303  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
13304  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
13305  {
13306  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
13307  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
13308  }
13309 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
13310 
13311  VkDeviceMemory mem = VK_NULL_HANDLE;
13312  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
13313  if(res < 0)
13314  {
13315  return res;
13316  }
13317 
13318  // New VkDeviceMemory successfully created.
13319 
13320  // Create new Allocation for it.
13321  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
13322  pBlock->Init(
13323  m_hAllocator,
13324  m_hParentPool,
13325  m_MemoryTypeIndex,
13326  mem,
13327  allocInfo.allocationSize,
13328  m_NextBlockId++,
13329  m_Algorithm);
13330 
13331  m_Blocks.push_back(pBlock);
13332  if(pNewBlockIndex != VMA_NULL)
13333  {
13334  *pNewBlockIndex = m_Blocks.size() - 1;
13335  }
13336 
13337  return VK_SUCCESS;
13338 }
13339 
13340 void VmaBlockVector::ApplyDefragmentationMovesCpu(
13341  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13342  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
13343 {
13344  const size_t blockCount = m_Blocks.size();
13345  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
13346 
13347  enum BLOCK_FLAG
13348  {
13349  BLOCK_FLAG_USED = 0x00000001,
13350  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
13351  };
13352 
13353  struct BlockInfo
13354  {
13355  uint32_t flags;
13356  void* pMappedData;
13357  };
13358  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
13359  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
13360  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
13361 
13362  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13363  const size_t moveCount = moves.size();
13364  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13365  {
13366  const VmaDefragmentationMove& move = moves[moveIndex];
13367  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
13368  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
13369  }
13370 
13371  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13372 
13373  // Go over all blocks. Get mapped pointer or map if necessary.
13374  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13375  {
13376  BlockInfo& currBlockInfo = blockInfo[blockIndex];
13377  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13378  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
13379  {
13380  currBlockInfo.pMappedData = pBlock->GetMappedData();
13381  // It is not originally mapped - map it.
13382  if(currBlockInfo.pMappedData == VMA_NULL)
13383  {
13384  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
13385  if(pDefragCtx->res == VK_SUCCESS)
13386  {
13387  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
13388  }
13389  }
13390  }
13391  }
13392 
13393  // Go over all moves. Do actual data transfer.
13394  if(pDefragCtx->res == VK_SUCCESS)
13395  {
13396  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13397  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13398 
13399  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13400  {
13401  const VmaDefragmentationMove& move = moves[moveIndex];
13402 
13403  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
13404  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
13405 
13406  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
13407 
13408  // Invalidate source.
13409  if(isNonCoherent)
13410  {
13411  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
13412  memRange.memory = pSrcBlock->GetDeviceMemory();
13413  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
13414  memRange.size = VMA_MIN(
13415  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
13416  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
13417  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13418  }
13419 
13420  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
13421  memmove(
13422  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
13423  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
13424  static_cast<size_t>(move.size));
13425 
13426  if(IsCorruptionDetectionEnabled())
13427  {
13428  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
13429  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
13430  }
13431 
13432  // Flush destination.
13433  if(isNonCoherent)
13434  {
13435  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
13436  memRange.memory = pDstBlock->GetDeviceMemory();
13437  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
13438  memRange.size = VMA_MIN(
13439  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
13440  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
13441  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13442  }
13443  }
13444  }
13445 
13446  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
13447  // Regardless of pCtx->res == VK_SUCCESS.
13448  for(size_t blockIndex = blockCount; blockIndex--; )
13449  {
13450  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
13451  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
13452  {
13453  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13454  pBlock->Unmap(m_hAllocator, 1);
13455  }
13456  }
13457 }
13458 
13459 void VmaBlockVector::ApplyDefragmentationMovesGpu(
13460  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13461  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13462  VkCommandBuffer commandBuffer)
13463 {
13464  const size_t blockCount = m_Blocks.size();
13465 
13466  pDefragCtx->blockContexts.resize(blockCount);
13467  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
13468 
13469  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13470  const size_t moveCount = moves.size();
13471  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13472  {
13473  const VmaDefragmentationMove& move = moves[moveIndex];
13474 
13475  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
13476  {
13477  // Old school move still require us to map the whole block
13478  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13479  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13480  }
13481  }
13482 
13483  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13484 
13485  // Go over all blocks. Create and bind buffer for whole block if necessary.
13486  {
13487  VkBufferCreateInfo bufCreateInfo;
13488  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
13489 
13490  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13491  {
13492  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
13493  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13494  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
13495  {
13496  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
13497  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
13498  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
13499  if(pDefragCtx->res == VK_SUCCESS)
13500  {
13501  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
13502  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
13503  }
13504  }
13505  }
13506  }
13507 
13508  // Go over all moves. Post data transfer commands to command buffer.
13509  if(pDefragCtx->res == VK_SUCCESS)
13510  {
13511  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13512  {
13513  const VmaDefragmentationMove& move = moves[moveIndex];
13514 
13515  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
13516  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
13517 
13518  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
13519 
13520  VkBufferCopy region = {
13521  move.srcOffset,
13522  move.dstOffset,
13523  move.size };
13524  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
13525  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
13526  }
13527  }
13528 
13529  // Save buffers to defrag context for later destruction.
13530  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
13531  {
13532  pDefragCtx->res = VK_NOT_READY;
13533  }
13534 }
13535 
13536 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
13537 {
13538  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13539  {
13540  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13541  if(pBlock->m_pMetadata->IsEmpty())
13542  {
13543  if(m_Blocks.size() > m_MinBlockCount)
13544  {
13545  if(pDefragmentationStats != VMA_NULL)
13546  {
13547  ++pDefragmentationStats->deviceMemoryBlocksFreed;
13548  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
13549  }
13550 
13551  VmaVectorRemove(m_Blocks, blockIndex);
13552  pBlock->Destroy(m_hAllocator);
13553  vma_delete(m_hAllocator, pBlock);
13554  }
13555  else
13556  {
13557  break;
13558  }
13559  }
13560  }
13561  UpdateHasEmptyBlock();
13562 }
13563 
13564 void VmaBlockVector::UpdateHasEmptyBlock()
13565 {
13566  m_HasEmptyBlock = false;
13567  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
13568  {
13569  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
13570  if(pBlock->m_pMetadata->IsEmpty())
13571  {
13572  m_HasEmptyBlock = true;
13573  break;
13574  }
13575  }
13576 }
13577 
13578 #if VMA_STATS_STRING_ENABLED
13579 
13580 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13581 {
13582  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13583 
13584  json.BeginObject();
13585 
13586  if(IsCustomPool())
13587  {
13588  const char* poolName = m_hParentPool->GetName();
13589  if(poolName != VMA_NULL && poolName[0] != '\0')
13590  {
13591  json.WriteString("Name");
13592  json.WriteString(poolName);
13593  }
13594 
13595  json.WriteString("MemoryTypeIndex");
13596  json.WriteNumber(m_MemoryTypeIndex);
13597 
13598  json.WriteString("BlockSize");
13599  json.WriteNumber(m_PreferredBlockSize);
13600 
13601  json.WriteString("BlockCount");
13602  json.BeginObject(true);
13603  if(m_MinBlockCount > 0)
13604  {
13605  json.WriteString("Min");
13606  json.WriteNumber((uint64_t)m_MinBlockCount);
13607  }
13608  if(m_MaxBlockCount < SIZE_MAX)
13609  {
13610  json.WriteString("Max");
13611  json.WriteNumber((uint64_t)m_MaxBlockCount);
13612  }
13613  json.WriteString("Cur");
13614  json.WriteNumber((uint64_t)m_Blocks.size());
13615  json.EndObject();
13616 
13617  if(m_FrameInUseCount > 0)
13618  {
13619  json.WriteString("FrameInUseCount");
13620  json.WriteNumber(m_FrameInUseCount);
13621  }
13622 
13623  if(m_Algorithm != 0)
13624  {
13625  json.WriteString("Algorithm");
13626  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
13627  }
13628  }
13629  else
13630  {
13631  json.WriteString("PreferredBlockSize");
13632  json.WriteNumber(m_PreferredBlockSize);
13633  }
13634 
13635  json.WriteString("Blocks");
13636  json.BeginObject();
13637  for(size_t i = 0; i < m_Blocks.size(); ++i)
13638  {
13639  json.BeginString();
13640  json.ContinueString(m_Blocks[i]->GetId());
13641  json.EndString();
13642 
13643  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
13644  }
13645  json.EndObject();
13646 
13647  json.EndObject();
13648 }
13649 
13650 #endif // #if VMA_STATS_STRING_ENABLED
13651 
13652 void VmaBlockVector::Defragment(
13653  class VmaBlockVectorDefragmentationContext* pCtx,
13655  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
13656  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
13657  VkCommandBuffer commandBuffer)
13658 {
13659  pCtx->res = VK_SUCCESS;
13660 
13661  const VkMemoryPropertyFlags memPropFlags =
13662  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
13663  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
13664 
13665  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
13666  isHostVisible;
13667  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
13668  !IsCorruptionDetectionEnabled() &&
13669  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
13670 
13671  // There are options to defragment this memory type.
13672  if(canDefragmentOnCpu || canDefragmentOnGpu)
13673  {
13674  bool defragmentOnGpu;
13675  // There is only one option to defragment this memory type.
13676  if(canDefragmentOnGpu != canDefragmentOnCpu)
13677  {
13678  defragmentOnGpu = canDefragmentOnGpu;
13679  }
13680  // Both options are available: Heuristics to choose the best one.
13681  else
13682  {
13683  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
13684  m_hAllocator->IsIntegratedGpu();
13685  }
13686 
13687  bool overlappingMoveSupported = !defragmentOnGpu;
13688 
13689  if(m_hAllocator->m_UseMutex)
13690  {
13692  {
13693  if(!m_Mutex.TryLockWrite())
13694  {
13695  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
13696  return;
13697  }
13698  }
13699  else
13700  {
13701  m_Mutex.LockWrite();
13702  pCtx->mutexLocked = true;
13703  }
13704  }
13705 
13706  pCtx->Begin(overlappingMoveSupported, flags);
13707 
13708  // Defragment.
13709 
13710  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
13711  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
13712  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
13713 
13714  // Accumulate statistics.
13715  if(pStats != VMA_NULL)
13716  {
13717  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
13718  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
13719  pStats->bytesMoved += bytesMoved;
13720  pStats->allocationsMoved += allocationsMoved;
13721  VMA_ASSERT(bytesMoved <= maxBytesToMove);
13722  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
13723  if(defragmentOnGpu)
13724  {
13725  maxGpuBytesToMove -= bytesMoved;
13726  maxGpuAllocationsToMove -= allocationsMoved;
13727  }
13728  else
13729  {
13730  maxCpuBytesToMove -= bytesMoved;
13731  maxCpuAllocationsToMove -= allocationsMoved;
13732  }
13733  }
13734 
13736  {
13737  if(m_hAllocator->m_UseMutex)
13738  m_Mutex.UnlockWrite();
13739 
13740  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
13741  pCtx->res = VK_NOT_READY;
13742 
13743  return;
13744  }
13745 
13746  if(pCtx->res >= VK_SUCCESS)
13747  {
13748  if(defragmentOnGpu)
13749  {
13750  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
13751  }
13752  else
13753  {
13754  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
13755  }
13756  }
13757  }
13758 }
13759 
13760 void VmaBlockVector::DefragmentationEnd(
13761  class VmaBlockVectorDefragmentationContext* pCtx,
13762  uint32_t flags,
13763  VmaDefragmentationStats* pStats)
13764 {
13765  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
13766  {
13767  VMA_ASSERT(pCtx->mutexLocked == false);
13768 
13769  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
13770  // lock protecting us. Since we mutate state here, we have to take the lock out now
13771  m_Mutex.LockWrite();
13772  pCtx->mutexLocked = true;
13773  }
13774 
13775  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
13776  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
13777  {
13778  // Destroy buffers.
13779  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
13780  {
13781  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
13782  if(blockCtx.hBuffer)
13783  {
13784  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
13785  }
13786  }
13787 
13788  if(pCtx->res >= VK_SUCCESS)
13789  {
13790  FreeEmptyBlocks(pStats);
13791  }
13792  }
13793 
13794  if(pCtx->mutexLocked)
13795  {
13796  VMA_ASSERT(m_hAllocator->m_UseMutex);
13797  m_Mutex.UnlockWrite();
13798  }
13799 }
13800 
13801 uint32_t VmaBlockVector::ProcessDefragmentations(
13802  class VmaBlockVectorDefragmentationContext *pCtx,
13803  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
13804 {
13805  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13806 
13807  const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
13808 
13809  for(uint32_t i = 0; i < moveCount; ++ i)
13810  {
13811  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
13812 
13813  pMove->allocation = move.hAllocation;
13814  pMove->memory = move.pDstBlock->GetDeviceMemory();
13815  pMove->offset = move.dstOffset;
13816 
13817  ++ pMove;
13818  }
13819 
13820  pCtx->defragmentationMovesProcessed += moveCount;
13821 
13822  return moveCount;
13823 }
13824 
13825 void VmaBlockVector::CommitDefragmentations(
13826  class VmaBlockVectorDefragmentationContext *pCtx,
13827  VmaDefragmentationStats* pStats)
13828 {
13829  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13830 
13831  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
13832  {
13833  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
13834 
13835  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
13836  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
13837  }
13838 
13839  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
13840  FreeEmptyBlocks(pStats);
13841 }
13842 
13843 size_t VmaBlockVector::CalcAllocationCount() const
13844 {
13845  size_t result = 0;
13846  for(size_t i = 0; i < m_Blocks.size(); ++i)
13847  {
13848  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
13849  }
13850  return result;
13851 }
13852 
13853 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
13854 {
13855  if(m_BufferImageGranularity == 1)
13856  {
13857  return false;
13858  }
13859  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
13860  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
13861  {
13862  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
13863  VMA_ASSERT(m_Algorithm == 0);
13864  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
13865  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
13866  {
13867  return true;
13868  }
13869  }
13870  return false;
13871 }
13872 
13873 void VmaBlockVector::MakePoolAllocationsLost(
13874  uint32_t currentFrameIndex,
13875  size_t* pLostAllocationCount)
13876 {
13877  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13878  size_t lostAllocationCount = 0;
13879  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13880  {
13881  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13882  VMA_ASSERT(pBlock);
13883  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
13884  }
13885  if(pLostAllocationCount != VMA_NULL)
13886  {
13887  *pLostAllocationCount = lostAllocationCount;
13888  }
13889 }
13890 
13891 VkResult VmaBlockVector::CheckCorruption()
13892 {
13893  if(!IsCorruptionDetectionEnabled())
13894  {
13895  return VK_ERROR_FEATURE_NOT_PRESENT;
13896  }
13897 
13898  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13899  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13900  {
13901  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13902  VMA_ASSERT(pBlock);
13903  VkResult res = pBlock->CheckCorruption(m_hAllocator);
13904  if(res != VK_SUCCESS)
13905  {
13906  return res;
13907  }
13908  }
13909  return VK_SUCCESS;
13910 }
13911 
13912 void VmaBlockVector::AddStats(VmaStats* pStats)
13913 {
13914  const uint32_t memTypeIndex = m_MemoryTypeIndex;
13915  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
13916 
13917  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13918 
13919  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13920  {
13921  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13922  VMA_ASSERT(pBlock);
13923  VMA_HEAVY_ASSERT(pBlock->Validate());
13924  VmaStatInfo allocationStatInfo;
13925  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
13926  VmaAddStatInfo(pStats->total, allocationStatInfo);
13927  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
13928  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
13929  }
13930 }
13931 
13933 // VmaDefragmentationAlgorithm_Generic members definition
13934 
13935 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13936  VmaAllocator hAllocator,
13937  VmaBlockVector* pBlockVector,
13938  uint32_t currentFrameIndex,
13939  bool overlappingMoveSupported) :
13940  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13941  m_AllocationCount(0),
13942  m_AllAllocations(false),
13943  m_BytesMoved(0),
13944  m_AllocationsMoved(0),
13945  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13946 {
13947  // Create block info for each block.
13948  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13949  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13950  {
13951  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13952  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13953  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13954  m_Blocks.push_back(pBlockInfo);
13955  }
13956 
13957  // Sort them by m_pBlock pointer value.
13958  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13959 }
13960 
13961 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13962 {
13963  for(size_t i = m_Blocks.size(); i--; )
13964  {
13965  vma_delete(m_hAllocator, m_Blocks[i]);
13966  }
13967 }
13968 
13969 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13970 {
13971  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13972  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13973  {
13974  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13975  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13976  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13977  {
13978  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13979  (*it)->m_Allocations.push_back(allocInfo);
13980  }
13981  else
13982  {
13983  VMA_ASSERT(0);
13984  }
13985 
13986  ++m_AllocationCount;
13987  }
13988 }
13989 
13990 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13991  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13992  VkDeviceSize maxBytesToMove,
13993  uint32_t maxAllocationsToMove,
13994  bool freeOldAllocations)
13995 {
13996  if(m_Blocks.empty())
13997  {
13998  return VK_SUCCESS;
13999  }
14000 
14001  // This is a choice based on research.
14002  // Option 1:
14003  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
14004  // Option 2:
14005  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
14006  // Option 3:
14007  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
14008 
14009  size_t srcBlockMinIndex = 0;
14010  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
14011  /*
14012  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
14013  {
14014  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
14015  if(blocksWithNonMovableCount > 0)
14016  {
14017  srcBlockMinIndex = blocksWithNonMovableCount - 1;
14018  }
14019  }
14020  */
14021 
14022  size_t srcBlockIndex = m_Blocks.size() - 1;
14023  size_t srcAllocIndex = SIZE_MAX;
14024  for(;;)
14025  {
14026  // 1. Find next allocation to move.
14027  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
14028  // 1.2. Then start from last to first m_Allocations.
14029  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
14030  {
14031  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
14032  {
14033  // Finished: no more allocations to process.
14034  if(srcBlockIndex == srcBlockMinIndex)
14035  {
14036  return VK_SUCCESS;
14037  }
14038  else
14039  {
14040  --srcBlockIndex;
14041  srcAllocIndex = SIZE_MAX;
14042  }
14043  }
14044  else
14045  {
14046  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
14047  }
14048  }
14049 
14050  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
14051  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
14052 
14053  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
14054  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
14055  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
14056  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
14057 
14058  // 2. Try to find new place for this allocation in preceding or current block.
14059  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
14060  {
14061  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
14062  VmaAllocationRequest dstAllocRequest;
14063  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
14064  m_CurrentFrameIndex,
14065  m_pBlockVector->GetFrameInUseCount(),
14066  m_pBlockVector->GetBufferImageGranularity(),
14067  size,
14068  alignment,
14069  false, // upperAddress
14070  suballocType,
14071  false, // canMakeOtherLost
14072  strategy,
14073  &dstAllocRequest) &&
14074  MoveMakesSense(
14075  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
14076  {
14077  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
14078 
14079  // Reached limit on number of allocations or bytes to move.
14080  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
14081  (m_BytesMoved + size > maxBytesToMove))
14082  {
14083  return VK_SUCCESS;
14084  }
14085 
14086  VmaDefragmentationMove move = {};
14087  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
14088  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
14089  move.srcOffset = srcOffset;
14090  move.dstOffset = dstAllocRequest.offset;
14091  move.size = size;
14092  move.hAllocation = allocInfo.m_hAllocation;
14093  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
14094  move.pDstBlock = pDstBlockInfo->m_pBlock;
14095 
14096  moves.push_back(move);
14097 
14098  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
14099  dstAllocRequest,
14100  suballocType,
14101  size,
14102  allocInfo.m_hAllocation);
14103 
14104  if(freeOldAllocations)
14105  {
14106  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
14107  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
14108  }
14109 
14110  if(allocInfo.m_pChanged != VMA_NULL)
14111  {
14112  *allocInfo.m_pChanged = VK_TRUE;
14113  }
14114 
14115  ++m_AllocationsMoved;
14116  m_BytesMoved += size;
14117 
14118  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
14119 
14120  break;
14121  }
14122  }
14123 
14124  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
14125 
14126  if(srcAllocIndex > 0)
14127  {
14128  --srcAllocIndex;
14129  }
14130  else
14131  {
14132  if(srcBlockIndex > 0)
14133  {
14134  --srcBlockIndex;
14135  srcAllocIndex = SIZE_MAX;
14136  }
14137  else
14138  {
14139  return VK_SUCCESS;
14140  }
14141  }
14142  }
14143 }
14144 
14145 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
14146 {
14147  size_t result = 0;
14148  for(size_t i = 0; i < m_Blocks.size(); ++i)
14149  {
14150  if(m_Blocks[i]->m_HasNonMovableAllocations)
14151  {
14152  ++result;
14153  }
14154  }
14155  return result;
14156 }
14157 
14158 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
14159  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14160  VkDeviceSize maxBytesToMove,
14161  uint32_t maxAllocationsToMove,
14163 {
14164  if(!m_AllAllocations && m_AllocationCount == 0)
14165  {
14166  return VK_SUCCESS;
14167  }
14168 
14169  const size_t blockCount = m_Blocks.size();
14170  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14171  {
14172  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
14173 
14174  if(m_AllAllocations)
14175  {
14176  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
14177  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
14178  it != pMetadata->m_Suballocations.end();
14179  ++it)
14180  {
14181  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
14182  {
14183  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
14184  pBlockInfo->m_Allocations.push_back(allocInfo);
14185  }
14186  }
14187  }
14188 
14189  pBlockInfo->CalcHasNonMovableAllocations();
14190 
14191  // This is a choice based on research.
14192  // Option 1:
14193  pBlockInfo->SortAllocationsByOffsetDescending();
14194  // Option 2:
14195  //pBlockInfo->SortAllocationsBySizeDescending();
14196  }
14197 
14198  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
14199  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
14200 
14201  // This is a choice based on research.
14202  const uint32_t roundCount = 2;
14203 
14204  // Execute defragmentation rounds (the main part).
14205  VkResult result = VK_SUCCESS;
14206  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
14207  {
14208  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
14209  }
14210 
14211  return result;
14212 }
14213 
14214 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
14215  size_t dstBlockIndex, VkDeviceSize dstOffset,
14216  size_t srcBlockIndex, VkDeviceSize srcOffset)
14217 {
14218  if(dstBlockIndex < srcBlockIndex)
14219  {
14220  return true;
14221  }
14222  if(dstBlockIndex > srcBlockIndex)
14223  {
14224  return false;
14225  }
14226  if(dstOffset < srcOffset)
14227  {
14228  return true;
14229  }
14230  return false;
14231 }
14232 
14234 // VmaDefragmentationAlgorithm_Fast
14235 
14236 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
14237  VmaAllocator hAllocator,
14238  VmaBlockVector* pBlockVector,
14239  uint32_t currentFrameIndex,
14240  bool overlappingMoveSupported) :
14241  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14242  m_OverlappingMoveSupported(overlappingMoveSupported),
14243  m_AllocationCount(0),
14244  m_AllAllocations(false),
14245  m_BytesMoved(0),
14246  m_AllocationsMoved(0),
14247  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
14248 {
14249  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
14250 
14251 }
14252 
14253 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
14254 {
14255 }
14256 
14257 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
14258  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14259  VkDeviceSize maxBytesToMove,
14260  uint32_t maxAllocationsToMove,
14262 {
14263  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
14264 
14265  const size_t blockCount = m_pBlockVector->GetBlockCount();
14266  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
14267  {
14268  return VK_SUCCESS;
14269  }
14270 
14271  PreprocessMetadata();
14272 
14273  // Sort blocks in order from most destination.
14274 
14275  m_BlockInfos.resize(blockCount);
14276  for(size_t i = 0; i < blockCount; ++i)
14277  {
14278  m_BlockInfos[i].origBlockIndex = i;
14279  }
14280 
14281  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
14282  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
14283  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
14284  });
14285 
14286  // THE MAIN ALGORITHM
14287 
14288  FreeSpaceDatabase freeSpaceDb;
14289 
14290  size_t dstBlockInfoIndex = 0;
14291  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14292  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14293  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14294  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
14295  VkDeviceSize dstOffset = 0;
14296 
14297  bool end = false;
14298  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
14299  {
14300  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
14301  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
14302  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
14303  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
14304  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
14305  {
14306  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
14307  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
14308  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
14309  if(m_AllocationsMoved == maxAllocationsToMove ||
14310  m_BytesMoved + srcAllocSize > maxBytesToMove)
14311  {
14312  end = true;
14313  break;
14314  }
14315  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
14316 
14317  VmaDefragmentationMove move = {};
14318  // Try to place it in one of free spaces from the database.
14319  size_t freeSpaceInfoIndex;
14320  VkDeviceSize dstAllocOffset;
14321  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
14322  freeSpaceInfoIndex, dstAllocOffset))
14323  {
14324  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
14325  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
14326  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
14327 
14328  // Same block
14329  if(freeSpaceInfoIndex == srcBlockInfoIndex)
14330  {
14331  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14332 
14333  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14334 
14335  VmaSuballocation suballoc = *srcSuballocIt;
14336  suballoc.offset = dstAllocOffset;
14337  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
14338  m_BytesMoved += srcAllocSize;
14339  ++m_AllocationsMoved;
14340 
14341  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14342  ++nextSuballocIt;
14343  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14344  srcSuballocIt = nextSuballocIt;
14345 
14346  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14347 
14348  move.srcBlockIndex = srcOrigBlockIndex;
14349  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14350  move.srcOffset = srcAllocOffset;
14351  move.dstOffset = dstAllocOffset;
14352  move.size = srcAllocSize;
14353 
14354  moves.push_back(move);
14355  }
14356  // Different block
14357  else
14358  {
14359  // MOVE OPTION 2: Move the allocation to a different block.
14360 
14361  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
14362 
14363  VmaSuballocation suballoc = *srcSuballocIt;
14364  suballoc.offset = dstAllocOffset;
14365  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
14366  m_BytesMoved += srcAllocSize;
14367  ++m_AllocationsMoved;
14368 
14369  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14370  ++nextSuballocIt;
14371  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14372  srcSuballocIt = nextSuballocIt;
14373 
14374  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14375 
14376  move.srcBlockIndex = srcOrigBlockIndex;
14377  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14378  move.srcOffset = srcAllocOffset;
14379  move.dstOffset = dstAllocOffset;
14380  move.size = srcAllocSize;
14381 
14382  moves.push_back(move);
14383  }
14384  }
14385  else
14386  {
14387  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
14388 
14389  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
14390  while(dstBlockInfoIndex < srcBlockInfoIndex &&
14391  dstAllocOffset + srcAllocSize > dstBlockSize)
14392  {
14393  // But before that, register remaining free space at the end of dst block.
14394  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
14395 
14396  ++dstBlockInfoIndex;
14397  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14398  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14399  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14400  dstBlockSize = pDstMetadata->GetSize();
14401  dstOffset = 0;
14402  dstAllocOffset = 0;
14403  }
14404 
14405  // Same block
14406  if(dstBlockInfoIndex == srcBlockInfoIndex)
14407  {
14408  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14409 
14410  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
14411 
14412  bool skipOver = overlap;
14413  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
14414  {
14415  // If destination and source place overlap, skip if it would move it
14416  // by only < 1/64 of its size.
14417  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
14418  }
14419 
14420  if(skipOver)
14421  {
14422  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
14423 
14424  dstOffset = srcAllocOffset + srcAllocSize;
14425  ++srcSuballocIt;
14426  }
14427  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14428  else
14429  {
14430  srcSuballocIt->offset = dstAllocOffset;
14431  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
14432  dstOffset = dstAllocOffset + srcAllocSize;
14433  m_BytesMoved += srcAllocSize;
14434  ++m_AllocationsMoved;
14435  ++srcSuballocIt;
14436 
14437  move.srcBlockIndex = srcOrigBlockIndex;
14438  move.dstBlockIndex = dstOrigBlockIndex;
14439  move.srcOffset = srcAllocOffset;
14440  move.dstOffset = dstAllocOffset;
14441  move.size = srcAllocSize;
14442 
14443  moves.push_back(move);
14444  }
14445  }
14446  // Different block
14447  else
14448  {
14449  // MOVE OPTION 2: Move the allocation to a different block.
14450 
14451  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
14452  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
14453 
14454  VmaSuballocation suballoc = *srcSuballocIt;
14455  suballoc.offset = dstAllocOffset;
14456  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
14457  dstOffset = dstAllocOffset + srcAllocSize;
14458  m_BytesMoved += srcAllocSize;
14459  ++m_AllocationsMoved;
14460 
14461  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14462  ++nextSuballocIt;
14463  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14464  srcSuballocIt = nextSuballocIt;
14465 
14466  pDstMetadata->m_Suballocations.push_back(suballoc);
14467 
14468  move.srcBlockIndex = srcOrigBlockIndex;
14469  move.dstBlockIndex = dstOrigBlockIndex;
14470  move.srcOffset = srcAllocOffset;
14471  move.dstOffset = dstAllocOffset;
14472  move.size = srcAllocSize;
14473 
14474  moves.push_back(move);
14475  }
14476  }
14477  }
14478  }
14479 
14480  m_BlockInfos.clear();
14481 
14482  PostprocessMetadata();
14483 
14484  return VK_SUCCESS;
14485 }
14486 
14487 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
14488 {
14489  const size_t blockCount = m_pBlockVector->GetBlockCount();
14490  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14491  {
14492  VmaBlockMetadata_Generic* const pMetadata =
14493  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14494  pMetadata->m_FreeCount = 0;
14495  pMetadata->m_SumFreeSize = pMetadata->GetSize();
14496  pMetadata->m_FreeSuballocationsBySize.clear();
14497  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14498  it != pMetadata->m_Suballocations.end(); )
14499  {
14500  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
14501  {
14502  VmaSuballocationList::iterator nextIt = it;
14503  ++nextIt;
14504  pMetadata->m_Suballocations.erase(it);
14505  it = nextIt;
14506  }
14507  else
14508  {
14509  ++it;
14510  }
14511  }
14512  }
14513 }
14514 
14515 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
14516 {
14517  const size_t blockCount = m_pBlockVector->GetBlockCount();
14518  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14519  {
14520  VmaBlockMetadata_Generic* const pMetadata =
14521  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14522  const VkDeviceSize blockSize = pMetadata->GetSize();
14523 
14524  // No allocations in this block - entire area is free.
14525  if(pMetadata->m_Suballocations.empty())
14526  {
14527  pMetadata->m_FreeCount = 1;
14528  //pMetadata->m_SumFreeSize is already set to blockSize.
14529  VmaSuballocation suballoc = {
14530  0, // offset
14531  blockSize, // size
14532  VMA_NULL, // hAllocation
14533  VMA_SUBALLOCATION_TYPE_FREE };
14534  pMetadata->m_Suballocations.push_back(suballoc);
14535  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
14536  }
14537  // There are some allocations in this block.
14538  else
14539  {
14540  VkDeviceSize offset = 0;
14541  VmaSuballocationList::iterator it;
14542  for(it = pMetadata->m_Suballocations.begin();
14543  it != pMetadata->m_Suballocations.end();
14544  ++it)
14545  {
14546  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
14547  VMA_ASSERT(it->offset >= offset);
14548 
14549  // Need to insert preceding free space.
14550  if(it->offset > offset)
14551  {
14552  ++pMetadata->m_FreeCount;
14553  const VkDeviceSize freeSize = it->offset - offset;
14554  VmaSuballocation suballoc = {
14555  offset, // offset
14556  freeSize, // size
14557  VMA_NULL, // hAllocation
14558  VMA_SUBALLOCATION_TYPE_FREE };
14559  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14560  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14561  {
14562  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
14563  }
14564  }
14565 
14566  pMetadata->m_SumFreeSize -= it->size;
14567  offset = it->offset + it->size;
14568  }
14569 
14570  // Need to insert trailing free space.
14571  if(offset < blockSize)
14572  {
14573  ++pMetadata->m_FreeCount;
14574  const VkDeviceSize freeSize = blockSize - offset;
14575  VmaSuballocation suballoc = {
14576  offset, // offset
14577  freeSize, // size
14578  VMA_NULL, // hAllocation
14579  VMA_SUBALLOCATION_TYPE_FREE };
14580  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
14581  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14582  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14583  {
14584  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
14585  }
14586  }
14587 
14588  VMA_SORT(
14589  pMetadata->m_FreeSuballocationsBySize.begin(),
14590  pMetadata->m_FreeSuballocationsBySize.end(),
14591  VmaSuballocationItemSizeLess());
14592  }
14593 
14594  VMA_HEAVY_ASSERT(pMetadata->Validate());
14595  }
14596 }
14597 
14598 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
14599 {
14600  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
14601  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14602  while(it != pMetadata->m_Suballocations.end())
14603  {
14604  if(it->offset < suballoc.offset)
14605  {
14606  ++it;
14607  }
14608  }
14609  pMetadata->m_Suballocations.insert(it, suballoc);
14610 }
14611 
14613 // VmaBlockVectorDefragmentationContext
14614 
14615 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
14616  VmaAllocator hAllocator,
14617  VmaPool hCustomPool,
14618  VmaBlockVector* pBlockVector,
14619  uint32_t currFrameIndex) :
14620  res(VK_SUCCESS),
14621  mutexLocked(false),
14622  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
14623  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
14624  defragmentationMovesProcessed(0),
14625  defragmentationMovesCommitted(0),
14626  hasDefragmentationPlan(0),
14627  m_hAllocator(hAllocator),
14628  m_hCustomPool(hCustomPool),
14629  m_pBlockVector(pBlockVector),
14630  m_CurrFrameIndex(currFrameIndex),
14631  m_pAlgorithm(VMA_NULL),
14632  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
14633  m_AllAllocations(false)
14634 {
14635 }
14636 
14637 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
14638 {
14639  vma_delete(m_hAllocator, m_pAlgorithm);
14640 }
14641 
14642 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14643 {
14644  AllocInfo info = { hAlloc, pChanged };
14645  m_Allocations.push_back(info);
14646 }
14647 
14648 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
14649 {
14650  const bool allAllocations = m_AllAllocations ||
14651  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
14652 
14653  /********************************
14654  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
14655  ********************************/
14656 
14657  /*
14658  Fast algorithm is supported only when certain criteria are met:
14659  - VMA_DEBUG_MARGIN is 0.
14660  - All allocations in this block vector are moveable.
14661  - There is no possibility of image/buffer granularity conflict.
14662  - The defragmentation is not incremental
14663  */
14664  if(VMA_DEBUG_MARGIN == 0 &&
14665  allAllocations &&
14666  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
14668  {
14669  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
14670  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14671  }
14672  else
14673  {
14674  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
14675  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14676  }
14677 
14678  if(allAllocations)
14679  {
14680  m_pAlgorithm->AddAll();
14681  }
14682  else
14683  {
14684  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
14685  {
14686  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
14687  }
14688  }
14689 }
14690 
14692 // VmaDefragmentationContext
14693 
14694 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
14695  VmaAllocator hAllocator,
14696  uint32_t currFrameIndex,
14697  uint32_t flags,
14698  VmaDefragmentationStats* pStats) :
14699  m_hAllocator(hAllocator),
14700  m_CurrFrameIndex(currFrameIndex),
14701  m_Flags(flags),
14702  m_pStats(pStats),
14703  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
14704 {
14705  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
14706 }
14707 
14708 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
14709 {
14710  for(size_t i = m_CustomPoolContexts.size(); i--; )
14711  {
14712  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
14713  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14714  vma_delete(m_hAllocator, pBlockVectorCtx);
14715  }
14716  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
14717  {
14718  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
14719  if(pBlockVectorCtx)
14720  {
14721  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14722  vma_delete(m_hAllocator, pBlockVectorCtx);
14723  }
14724  }
14725 }
14726 
14727 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
14728 {
14729  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
14730  {
14731  VmaPool pool = pPools[poolIndex];
14732  VMA_ASSERT(pool);
14733  // Pools with algorithm other than default are not defragmented.
14734  if(pool->m_BlockVector.GetAlgorithm() == 0)
14735  {
14736  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14737 
14738  for(size_t i = m_CustomPoolContexts.size(); i--; )
14739  {
14740  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
14741  {
14742  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14743  break;
14744  }
14745  }
14746 
14747  if(!pBlockVectorDefragCtx)
14748  {
14749  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14750  m_hAllocator,
14751  pool,
14752  &pool->m_BlockVector,
14753  m_CurrFrameIndex);
14754  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14755  }
14756 
14757  pBlockVectorDefragCtx->AddAll();
14758  }
14759  }
14760 }
14761 
14762 void VmaDefragmentationContext_T::AddAllocations(
14763  uint32_t allocationCount,
14764  const VmaAllocation* pAllocations,
14765  VkBool32* pAllocationsChanged)
14766 {
14767  // Dispatch pAllocations among defragmentators. Create them when necessary.
14768  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14769  {
14770  const VmaAllocation hAlloc = pAllocations[allocIndex];
14771  VMA_ASSERT(hAlloc);
14772  // DedicatedAlloc cannot be defragmented.
14773  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
14774  // Lost allocation cannot be defragmented.
14775  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
14776  {
14777  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14778 
14779  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
14780  // This allocation belongs to custom pool.
14781  if(hAllocPool != VK_NULL_HANDLE)
14782  {
14783  // Pools with algorithm other than default are not defragmented.
14784  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
14785  {
14786  for(size_t i = m_CustomPoolContexts.size(); i--; )
14787  {
14788  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
14789  {
14790  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14791  break;
14792  }
14793  }
14794  if(!pBlockVectorDefragCtx)
14795  {
14796  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14797  m_hAllocator,
14798  hAllocPool,
14799  &hAllocPool->m_BlockVector,
14800  m_CurrFrameIndex);
14801  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14802  }
14803  }
14804  }
14805  // This allocation belongs to default pool.
14806  else
14807  {
14808  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
14809  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
14810  if(!pBlockVectorDefragCtx)
14811  {
14812  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14813  m_hAllocator,
14814  VMA_NULL, // hCustomPool
14815  m_hAllocator->m_pBlockVectors[memTypeIndex],
14816  m_CurrFrameIndex);
14817  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
14818  }
14819  }
14820 
14821  if(pBlockVectorDefragCtx)
14822  {
14823  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
14824  &pAllocationsChanged[allocIndex] : VMA_NULL;
14825  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
14826  }
14827  }
14828  }
14829 }
14830 
14831 VkResult VmaDefragmentationContext_T::Defragment(
14832  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
14833  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
14834  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
14835 {
14836  if(pStats)
14837  {
14838  memset(pStats, 0, sizeof(VmaDefragmentationStats));
14839  }
14840 
14842  {
14843  // For incremental defragmetnations, we just earmark how much we can move
14844  // The real meat is in the defragmentation steps
14845  m_MaxCpuBytesToMove = maxCpuBytesToMove;
14846  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
14847 
14848  m_MaxGpuBytesToMove = maxGpuBytesToMove;
14849  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
14850 
14851  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
14852  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
14853  return VK_SUCCESS;
14854 
14855  return VK_NOT_READY;
14856  }
14857 
14858  if(commandBuffer == VK_NULL_HANDLE)
14859  {
14860  maxGpuBytesToMove = 0;
14861  maxGpuAllocationsToMove = 0;
14862  }
14863 
14864  VkResult res = VK_SUCCESS;
14865 
14866  // Process default pools.
14867  for(uint32_t memTypeIndex = 0;
14868  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
14869  ++memTypeIndex)
14870  {
14871  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14872  if(pBlockVectorCtx)
14873  {
14874  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14875  pBlockVectorCtx->GetBlockVector()->Defragment(
14876  pBlockVectorCtx,
14877  pStats, flags,
14878  maxCpuBytesToMove, maxCpuAllocationsToMove,
14879  maxGpuBytesToMove, maxGpuAllocationsToMove,
14880  commandBuffer);
14881  if(pBlockVectorCtx->res != VK_SUCCESS)
14882  {
14883  res = pBlockVectorCtx->res;
14884  }
14885  }
14886  }
14887 
14888  // Process custom pools.
14889  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14890  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
14891  ++customCtxIndex)
14892  {
14893  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14894  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14895  pBlockVectorCtx->GetBlockVector()->Defragment(
14896  pBlockVectorCtx,
14897  pStats, flags,
14898  maxCpuBytesToMove, maxCpuAllocationsToMove,
14899  maxGpuBytesToMove, maxGpuAllocationsToMove,
14900  commandBuffer);
14901  if(pBlockVectorCtx->res != VK_SUCCESS)
14902  {
14903  res = pBlockVectorCtx->res;
14904  }
14905  }
14906 
14907  return res;
14908 }
14909 
14910 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
14911 {
14912  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
14913  uint32_t movesLeft = pInfo->moveCount;
14914 
14915  // Process default pools.
14916  for(uint32_t memTypeIndex = 0;
14917  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14918  ++memTypeIndex)
14919  {
14920  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14921  if(pBlockVectorCtx)
14922  {
14923  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14924 
14925  if(!pBlockVectorCtx->hasDefragmentationPlan)
14926  {
14927  pBlockVectorCtx->GetBlockVector()->Defragment(
14928  pBlockVectorCtx,
14929  m_pStats, m_Flags,
14930  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14931  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14932  VK_NULL_HANDLE);
14933 
14934  if(pBlockVectorCtx->res < VK_SUCCESS)
14935  continue;
14936 
14937  pBlockVectorCtx->hasDefragmentationPlan = true;
14938  }
14939 
14940  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14941  pBlockVectorCtx,
14942  pCurrentMove, movesLeft);
14943 
14944  movesLeft -= processed;
14945  pCurrentMove += processed;
14946  }
14947  }
14948 
14949  // Process custom pools.
14950  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14951  customCtxIndex < customCtxCount;
14952  ++customCtxIndex)
14953  {
14954  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14955  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14956 
14957  if(!pBlockVectorCtx->hasDefragmentationPlan)
14958  {
14959  pBlockVectorCtx->GetBlockVector()->Defragment(
14960  pBlockVectorCtx,
14961  m_pStats, m_Flags,
14962  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14963  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14964  VK_NULL_HANDLE);
14965 
14966  if(pBlockVectorCtx->res < VK_SUCCESS)
14967  continue;
14968 
14969  pBlockVectorCtx->hasDefragmentationPlan = true;
14970  }
14971 
14972  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14973  pBlockVectorCtx,
14974  pCurrentMove, movesLeft);
14975 
14976  movesLeft -= processed;
14977  pCurrentMove += processed;
14978  }
14979 
14980  pInfo->moveCount = pInfo->moveCount - movesLeft;
14981 
14982  return VK_SUCCESS;
14983 }
14984 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
14985 {
14986  VkResult res = VK_SUCCESS;
14987 
14988  // Process default pools.
14989  for(uint32_t memTypeIndex = 0;
14990  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14991  ++memTypeIndex)
14992  {
14993  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14994  if(pBlockVectorCtx)
14995  {
14996  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14997 
14998  if(!pBlockVectorCtx->hasDefragmentationPlan)
14999  {
15000  res = VK_NOT_READY;
15001  continue;
15002  }
15003 
15004  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
15005  pBlockVectorCtx, m_pStats);
15006 
15007  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15008  res = VK_NOT_READY;
15009  }
15010  }
15011 
15012  // Process custom pools.
15013  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
15014  customCtxIndex < customCtxCount;
15015  ++customCtxIndex)
15016  {
15017  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
15018  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
15019 
15020  if(!pBlockVectorCtx->hasDefragmentationPlan)
15021  {
15022  res = VK_NOT_READY;
15023  continue;
15024  }
15025 
15026  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
15027  pBlockVectorCtx, m_pStats);
15028 
15029  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15030  res = VK_NOT_READY;
15031  }
15032 
15033  return res;
15034 }
15035 
15037 // VmaRecorder
15038 
15039 #if VMA_RECORDING_ENABLED
15040 
15041 VmaRecorder::VmaRecorder() :
15042  m_UseMutex(true),
15043  m_Flags(0),
15044  m_File(VMA_NULL),
15045  m_RecordingStartTime(std::chrono::high_resolution_clock::now())
15046 {
15047 }
15048 
15049 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
15050 {
15051  m_UseMutex = useMutex;
15052  m_Flags = settings.flags;
15053 
15054 #if defined(_WIN32)
15055  // Open file for writing.
15056  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
15057 
15058  if(err != 0)
15059  {
15060  return VK_ERROR_INITIALIZATION_FAILED;
15061  }
15062 #else
15063  // Open file for writing.
15064  m_File = fopen(settings.pFilePath, "wb");
15065 
15066  if(m_File == 0)
15067  {
15068  return VK_ERROR_INITIALIZATION_FAILED;
15069  }
15070 #endif
15071 
15072  // Write header.
15073  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
15074  fprintf(m_File, "%s\n", "1,8");
15075 
15076  return VK_SUCCESS;
15077 }
15078 
15079 VmaRecorder::~VmaRecorder()
15080 {
15081  if(m_File != VMA_NULL)
15082  {
15083  fclose(m_File);
15084  }
15085 }
15086 
15087 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
15088 {
15089  CallParams callParams;
15090  GetBasicParams(callParams);
15091 
15092  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15093  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
15094  Flush();
15095 }
15096 
15097 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
15098 {
15099  CallParams callParams;
15100  GetBasicParams(callParams);
15101 
15102  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15103  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
15104  Flush();
15105 }
15106 
15107 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
15108 {
15109  CallParams callParams;
15110  GetBasicParams(callParams);
15111 
15112  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15113  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
15114  createInfo.memoryTypeIndex,
15115  createInfo.flags,
15116  createInfo.blockSize,
15117  (uint64_t)createInfo.minBlockCount,
15118  (uint64_t)createInfo.maxBlockCount,
15119  createInfo.frameInUseCount,
15120  pool);
15121  Flush();
15122 }
15123 
15124 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
15125 {
15126  CallParams callParams;
15127  GetBasicParams(callParams);
15128 
15129  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15130  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
15131  pool);
15132  Flush();
15133 }
15134 
15135 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
15136  const VkMemoryRequirements& vkMemReq,
15137  const VmaAllocationCreateInfo& createInfo,
15138  VmaAllocation allocation)
15139 {
15140  CallParams callParams;
15141  GetBasicParams(callParams);
15142 
15143  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15144  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15145  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15146  vkMemReq.size,
15147  vkMemReq.alignment,
15148  vkMemReq.memoryTypeBits,
15149  createInfo.flags,
15150  createInfo.usage,
15151  createInfo.requiredFlags,
15152  createInfo.preferredFlags,
15153  createInfo.memoryTypeBits,
15154  createInfo.pool,
15155  allocation,
15156  userDataStr.GetString());
15157  Flush();
15158 }
15159 
15160 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
15161  const VkMemoryRequirements& vkMemReq,
15162  const VmaAllocationCreateInfo& createInfo,
15163  uint64_t allocationCount,
15164  const VmaAllocation* pAllocations)
15165 {
15166  CallParams callParams;
15167  GetBasicParams(callParams);
15168 
15169  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15170  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15171  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
15172  vkMemReq.size,
15173  vkMemReq.alignment,
15174  vkMemReq.memoryTypeBits,
15175  createInfo.flags,
15176  createInfo.usage,
15177  createInfo.requiredFlags,
15178  createInfo.preferredFlags,
15179  createInfo.memoryTypeBits,
15180  createInfo.pool);
15181  PrintPointerList(allocationCount, pAllocations);
15182  fprintf(m_File, ",%s\n", userDataStr.GetString());
15183  Flush();
15184 }
15185 
15186 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
15187  const VkMemoryRequirements& vkMemReq,
15188  bool requiresDedicatedAllocation,
15189  bool prefersDedicatedAllocation,
15190  const VmaAllocationCreateInfo& createInfo,
15191  VmaAllocation allocation)
15192 {
15193  CallParams callParams;
15194  GetBasicParams(callParams);
15195 
15196  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15197  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15198  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15199  vkMemReq.size,
15200  vkMemReq.alignment,
15201  vkMemReq.memoryTypeBits,
15202  requiresDedicatedAllocation ? 1 : 0,
15203  prefersDedicatedAllocation ? 1 : 0,
15204  createInfo.flags,
15205  createInfo.usage,
15206  createInfo.requiredFlags,
15207  createInfo.preferredFlags,
15208  createInfo.memoryTypeBits,
15209  createInfo.pool,
15210  allocation,
15211  userDataStr.GetString());
15212  Flush();
15213 }
15214 
15215 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
15216  const VkMemoryRequirements& vkMemReq,
15217  bool requiresDedicatedAllocation,
15218  bool prefersDedicatedAllocation,
15219  const VmaAllocationCreateInfo& createInfo,
15220  VmaAllocation allocation)
15221 {
15222  CallParams callParams;
15223  GetBasicParams(callParams);
15224 
15225  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15226  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15227  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15228  vkMemReq.size,
15229  vkMemReq.alignment,
15230  vkMemReq.memoryTypeBits,
15231  requiresDedicatedAllocation ? 1 : 0,
15232  prefersDedicatedAllocation ? 1 : 0,
15233  createInfo.flags,
15234  createInfo.usage,
15235  createInfo.requiredFlags,
15236  createInfo.preferredFlags,
15237  createInfo.memoryTypeBits,
15238  createInfo.pool,
15239  allocation,
15240  userDataStr.GetString());
15241  Flush();
15242 }
15243 
15244 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
15245  VmaAllocation allocation)
15246 {
15247  CallParams callParams;
15248  GetBasicParams(callParams);
15249 
15250  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15251  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15252  allocation);
15253  Flush();
15254 }
15255 
15256 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
15257  uint64_t allocationCount,
15258  const VmaAllocation* pAllocations)
15259 {
15260  CallParams callParams;
15261  GetBasicParams(callParams);
15262 
15263  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15264  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
15265  PrintPointerList(allocationCount, pAllocations);
15266  fprintf(m_File, "\n");
15267  Flush();
15268 }
15269 
15270 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
15271  VmaAllocation allocation,
15272  const void* pUserData)
15273 {
15274  CallParams callParams;
15275  GetBasicParams(callParams);
15276 
15277  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15278  UserDataString userDataStr(
15279  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
15280  pUserData);
15281  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15282  allocation,
15283  userDataStr.GetString());
15284  Flush();
15285 }
15286 
15287 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
15288  VmaAllocation allocation)
15289 {
15290  CallParams callParams;
15291  GetBasicParams(callParams);
15292 
15293  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15294  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15295  allocation);
15296  Flush();
15297 }
15298 
15299 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
15300  VmaAllocation allocation)
15301 {
15302  CallParams callParams;
15303  GetBasicParams(callParams);
15304 
15305  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15306  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15307  allocation);
15308  Flush();
15309 }
15310 
15311 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
15312  VmaAllocation allocation)
15313 {
15314  CallParams callParams;
15315  GetBasicParams(callParams);
15316 
15317  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15318  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15319  allocation);
15320  Flush();
15321 }
15322 
15323 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
15324  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15325 {
15326  CallParams callParams;
15327  GetBasicParams(callParams);
15328 
15329  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15330  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15331  allocation,
15332  offset,
15333  size);
15334  Flush();
15335 }
15336 
15337 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
15338  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15339 {
15340  CallParams callParams;
15341  GetBasicParams(callParams);
15342 
15343  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15344  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15345  allocation,
15346  offset,
15347  size);
15348  Flush();
15349 }
15350 
15351 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
15352  const VkBufferCreateInfo& bufCreateInfo,
15353  const VmaAllocationCreateInfo& allocCreateInfo,
15354  VmaAllocation allocation)
15355 {
15356  CallParams callParams;
15357  GetBasicParams(callParams);
15358 
15359  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15360  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15361  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15362  bufCreateInfo.flags,
15363  bufCreateInfo.size,
15364  bufCreateInfo.usage,
15365  bufCreateInfo.sharingMode,
15366  allocCreateInfo.flags,
15367  allocCreateInfo.usage,
15368  allocCreateInfo.requiredFlags,
15369  allocCreateInfo.preferredFlags,
15370  allocCreateInfo.memoryTypeBits,
15371  allocCreateInfo.pool,
15372  allocation,
15373  userDataStr.GetString());
15374  Flush();
15375 }
15376 
15377 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
15378  const VkImageCreateInfo& imageCreateInfo,
15379  const VmaAllocationCreateInfo& allocCreateInfo,
15380  VmaAllocation allocation)
15381 {
15382  CallParams callParams;
15383  GetBasicParams(callParams);
15384 
15385  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15386  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15387  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15388  imageCreateInfo.flags,
15389  imageCreateInfo.imageType,
15390  imageCreateInfo.format,
15391  imageCreateInfo.extent.width,
15392  imageCreateInfo.extent.height,
15393  imageCreateInfo.extent.depth,
15394  imageCreateInfo.mipLevels,
15395  imageCreateInfo.arrayLayers,
15396  imageCreateInfo.samples,
15397  imageCreateInfo.tiling,
15398  imageCreateInfo.usage,
15399  imageCreateInfo.sharingMode,
15400  imageCreateInfo.initialLayout,
15401  allocCreateInfo.flags,
15402  allocCreateInfo.usage,
15403  allocCreateInfo.requiredFlags,
15404  allocCreateInfo.preferredFlags,
15405  allocCreateInfo.memoryTypeBits,
15406  allocCreateInfo.pool,
15407  allocation,
15408  userDataStr.GetString());
15409  Flush();
15410 }
15411 
15412 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
15413  VmaAllocation allocation)
15414 {
15415  CallParams callParams;
15416  GetBasicParams(callParams);
15417 
15418  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15419  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
15420  allocation);
15421  Flush();
15422 }
15423 
15424 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
15425  VmaAllocation allocation)
15426 {
15427  CallParams callParams;
15428  GetBasicParams(callParams);
15429 
15430  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15431  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
15432  allocation);
15433  Flush();
15434 }
15435 
15436 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
15437  VmaAllocation allocation)
15438 {
15439  CallParams callParams;
15440  GetBasicParams(callParams);
15441 
15442  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15443  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15444  allocation);
15445  Flush();
15446 }
15447 
15448 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
15449  VmaAllocation allocation)
15450 {
15451  CallParams callParams;
15452  GetBasicParams(callParams);
15453 
15454  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15455  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
15456  allocation);
15457  Flush();
15458 }
15459 
15460 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
15461  VmaPool pool)
15462 {
15463  CallParams callParams;
15464  GetBasicParams(callParams);
15465 
15466  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15467  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
15468  pool);
15469  Flush();
15470 }
15471 
15472 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
15473  const VmaDefragmentationInfo2& info,
15475 {
15476  CallParams callParams;
15477  GetBasicParams(callParams);
15478 
15479  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15480  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
15481  info.flags);
15482  PrintPointerList(info.allocationCount, info.pAllocations);
15483  fprintf(m_File, ",");
15484  PrintPointerList(info.poolCount, info.pPools);
15485  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
15486  info.maxCpuBytesToMove,
15488  info.maxGpuBytesToMove,
15490  info.commandBuffer,
15491  ctx);
15492  Flush();
15493 }
15494 
15495 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
15497 {
15498  CallParams callParams;
15499  GetBasicParams(callParams);
15500 
15501  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15502  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
15503  ctx);
15504  Flush();
15505 }
15506 
15507 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
15508  VmaPool pool,
15509  const char* name)
15510 {
15511  CallParams callParams;
15512  GetBasicParams(callParams);
15513 
15514  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15515  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15516  pool, name != VMA_NULL ? name : "");
15517  Flush();
15518 }
15519 
15520 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
15521 {
15522  if(pUserData != VMA_NULL)
15523  {
15524  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
15525  {
15526  m_Str = (const char*)pUserData;
15527  }
15528  else
15529  {
15530  // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it.
15531  snprintf(m_PtrStr, 17, "%p", pUserData);
15532  m_Str = m_PtrStr;
15533  }
15534  }
15535  else
15536  {
15537  m_Str = "";
15538  }
15539 }
15540 
15541 void VmaRecorder::WriteConfiguration(
15542  const VkPhysicalDeviceProperties& devProps,
15543  const VkPhysicalDeviceMemoryProperties& memProps,
15544  uint32_t vulkanApiVersion,
15545  bool dedicatedAllocationExtensionEnabled,
15546  bool bindMemory2ExtensionEnabled,
15547  bool memoryBudgetExtensionEnabled,
15548  bool deviceCoherentMemoryExtensionEnabled)
15549 {
15550  fprintf(m_File, "Config,Begin\n");
15551 
15552  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
15553 
15554  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
15555  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
15556  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
15557  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
15558  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
15559  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
15560 
15561  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
15562  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
15563  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
15564 
15565  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
15566  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
15567  {
15568  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
15569  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
15570  }
15571  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
15572  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
15573  {
15574  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
15575  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
15576  }
15577 
15578  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
15579  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
15580  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
15581  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
15582 
15583  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
15584  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
15585  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
15586  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
15587  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
15588  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
15589  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
15590  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
15591  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15592 
15593  fprintf(m_File, "Config,End\n");
15594 }
15595 
15596 void VmaRecorder::GetBasicParams(CallParams& outParams)
15597 {
15598  #if defined(_WIN32)
15599  outParams.threadId = GetCurrentThreadId();
15600  #else
15601  // Use C++11 features to get thread id and convert it to uint32_t.
15602  // There is room for optimization since sstream is quite slow.
15603  // Is there a better way to convert std::this_thread::get_id() to uint32_t?
15604  std::thread::id thread_id = std::this_thread::get_id();
15605  stringstream thread_id_to_string_converter;
15606  thread_id_to_string_converter << thread_id;
15607  string thread_id_as_string = thread_id_to_string_converter.str();
15608  outParams.threadId = static_cast<uint32_t>(std::stoi(thread_id_as_string.c_str()));
15609  #endif
15610 
15611  auto current_time = std::chrono::high_resolution_clock::now();
15612 
15613  outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
15614 }
15615 
15616 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
15617 {
15618  if(count)
15619  {
15620  fprintf(m_File, "%p", pItems[0]);
15621  for(uint64_t i = 1; i < count; ++i)
15622  {
15623  fprintf(m_File, " %p", pItems[i]);
15624  }
15625  }
15626 }
15627 
15628 void VmaRecorder::Flush()
15629 {
15630  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
15631  {
15632  fflush(m_File);
15633  }
15634 }
15635 
15636 #endif // #if VMA_RECORDING_ENABLED
15637 
15639 // VmaAllocationObjectAllocator
15640 
15641 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
15642  m_Allocator(pAllocationCallbacks, 1024)
15643 {
15644 }
15645 
15646 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
15647 {
15648  VmaMutexLock mutexLock(m_Mutex);
15649  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
15650 }
15651 
15652 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
15653 {
15654  VmaMutexLock mutexLock(m_Mutex);
15655  m_Allocator.Free(hAlloc);
15656 }
15657 
15659 // VmaAllocator_T
15660 
15661 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
15662  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
15663  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
15664  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
15665  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
15666  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
15667  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
15668  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
15669  m_hDevice(pCreateInfo->device),
15670  m_hInstance(pCreateInfo->instance),
15671  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
15672  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
15673  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
15674  m_AllocationObjectAllocator(&m_AllocationCallbacks),
15675  m_HeapSizeLimitMask(0),
15676  m_PreferredLargeHeapBlockSize(0),
15677  m_PhysicalDevice(pCreateInfo->physicalDevice),
15678  m_CurrentFrameIndex(0),
15679  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
15680  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
15681  m_NextPoolId(0),
15682  m_GlobalMemoryTypeBits(UINT32_MAX)
15684  ,m_pRecorder(VMA_NULL)
15685 #endif
15686 {
15687  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15688  {
15689  m_UseKhrDedicatedAllocation = false;
15690  m_UseKhrBindMemory2 = false;
15691  }
15692 
15693  if(VMA_DEBUG_DETECT_CORRUPTION)
15694  {
15695  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
15696  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
15697  }
15698 
15699  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
15700 
15701  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15702  {
15703 #if !(VMA_DEDICATED_ALLOCATION)
15705  {
15706  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
15707  }
15708 #endif
15709 #if !(VMA_BIND_MEMORY2)
15710  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
15711  {
15712  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
15713  }
15714 #endif
15715  }
15716 #if !(VMA_MEMORY_BUDGET)
15717  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
15718  {
15719  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
15720  }
15721 #endif
15722 #if !(VMA_BUFFER_DEVICE_ADDRESS)
15723  if(m_UseKhrBufferDeviceAddress)
15724  {
15725  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
15726  }
15727 #endif
15728 #if VMA_VULKAN_VERSION < 1002000
15729  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
15730  {
15731  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
15732  }
15733 #endif
15734 #if VMA_VULKAN_VERSION < 1001000
15735  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15736  {
15737  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
15738  }
15739 #endif
15740 
15741  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
15742  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
15743  memset(&m_MemProps, 0, sizeof(m_MemProps));
15744 
15745  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
15746  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
15747  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
15748 
15749  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
15750  {
15751  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
15752  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
15753  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
15754  }
15755 
15756  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
15757 
15758  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
15759  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
15760 
15761  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
15762  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
15763  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
15764  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
15765 
15766  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
15767  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15768 
15769  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
15770 
15771  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
15772  {
15773  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15774  {
15775  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
15776  if(limit != VK_WHOLE_SIZE)
15777  {
15778  m_HeapSizeLimitMask |= 1u << heapIndex;
15779  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
15780  {
15781  m_MemProps.memoryHeaps[heapIndex].size = limit;
15782  }
15783  }
15784  }
15785  }
15786 
15787  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15788  {
15789  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
15790 
15791  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
15792  this,
15793  VK_NULL_HANDLE, // hParentPool
15794  memTypeIndex,
15795  preferredBlockSize,
15796  0,
15797  SIZE_MAX,
15798  GetBufferImageGranularity(),
15799  pCreateInfo->frameInUseCount,
15800  false, // explicitBlockSize
15801  false); // linearAlgorithm
15802  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
15803  // becase minBlockCount is 0.
15804  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
15805 
15806  }
15807 }
15808 
15809 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
15810 {
15811  VkResult res = VK_SUCCESS;
15812 
15813  if(pCreateInfo->pRecordSettings != VMA_NULL &&
15814  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
15815  {
15816 #if VMA_RECORDING_ENABLED
15817  m_pRecorder = vma_new(this, VmaRecorder)();
15818  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
15819  if(res != VK_SUCCESS)
15820  {
15821  return res;
15822  }
15823  m_pRecorder->WriteConfiguration(
15824  m_PhysicalDeviceProperties,
15825  m_MemProps,
15826  m_VulkanApiVersion,
15827  m_UseKhrDedicatedAllocation,
15828  m_UseKhrBindMemory2,
15829  m_UseExtMemoryBudget,
15830  m_UseAmdDeviceCoherentMemory);
15831  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
15832 #else
15833  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
15834  return VK_ERROR_FEATURE_NOT_PRESENT;
15835 #endif
15836  }
15837 
15838 #if VMA_MEMORY_BUDGET
15839  if(m_UseExtMemoryBudget)
15840  {
15841  UpdateVulkanBudget();
15842  }
15843 #endif // #if VMA_MEMORY_BUDGET
15844 
15845  return res;
15846 }
15847 
15848 VmaAllocator_T::~VmaAllocator_T()
15849 {
15850 #if VMA_RECORDING_ENABLED
15851  if(m_pRecorder != VMA_NULL)
15852  {
15853  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
15854  vma_delete(this, m_pRecorder);
15855  }
15856 #endif
15857 
15858  VMA_ASSERT(m_Pools.empty());
15859 
15860  for(size_t i = GetMemoryTypeCount(); i--; )
15861  {
15862  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
15863  {
15864  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
15865  }
15866 
15867  vma_delete(this, m_pDedicatedAllocations[i]);
15868  vma_delete(this, m_pBlockVectors[i]);
15869  }
15870 }
15871 
15872 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
15873 {
15874 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15875  ImportVulkanFunctions_Static();
15876 #endif
15877 
15878  if(pVulkanFunctions != VMA_NULL)
15879  {
15880  ImportVulkanFunctions_Custom(pVulkanFunctions);
15881  }
15882 
15883 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15884  ImportVulkanFunctions_Dynamic();
15885 #endif
15886 
15887  ValidateVulkanFunctions();
15888 }
15889 
15890 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15891 
15892 void VmaAllocator_T::ImportVulkanFunctions_Static()
15893 {
15894  // Vulkan 1.0
15895  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
15896  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
15897  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
15898  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
15899  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
15900  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
15901  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
15902  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
15903  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
15904  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
15905  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
15906  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
15907  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
15908  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
15909  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
15910  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
15911  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
15912 
15913  // Vulkan 1.1
15914 #if VMA_VULKAN_VERSION >= 1001000
15915  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15916  {
15917  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
15918  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
15919  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
15920  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
15921  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
15922  }
15923 #endif
15924 }
15925 
15926 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15927 
15928 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
15929 {
15930  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
15931 
15932 #define VMA_COPY_IF_NOT_NULL(funcName) \
15933  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
15934 
15935  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
15936  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
15937  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
15938  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
15939  VMA_COPY_IF_NOT_NULL(vkMapMemory);
15940  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
15941  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
15942  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
15943  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
15944  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
15945  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
15946  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
15947  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
15948  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
15949  VMA_COPY_IF_NOT_NULL(vkCreateImage);
15950  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
15951  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
15952 
15953 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15954  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
15955  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
15956 #endif
15957 
15958 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15959  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
15960  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
15961 #endif
15962 
15963 #if VMA_MEMORY_BUDGET
15964  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
15965 #endif
15966 
15967 #undef VMA_COPY_IF_NOT_NULL
15968 }
15969 
15970 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15971 
15972 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
15973 {
15974 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
15975  if(m_VulkanFunctions.memberName == VMA_NULL) \
15976  m_VulkanFunctions.memberName = \
15977  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
15978 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
15979  if(m_VulkanFunctions.memberName == VMA_NULL) \
15980  m_VulkanFunctions.memberName = \
15981  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
15982 
15983  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
15984  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
15985  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
15986  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
15987  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
15988  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
15989  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
15990  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
15991  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
15992  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
15993  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
15994  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
15995  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
15996  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
15997  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
15998  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
15999  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
16000 
16001 #if VMA_VULKAN_VERSION >= 1001000
16002  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16003  {
16004  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
16005  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
16006  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
16007  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
16008  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
16009  }
16010 #endif
16011 
16012 #if VMA_DEDICATED_ALLOCATION
16013  if(m_UseKhrDedicatedAllocation)
16014  {
16015  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
16016  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
16017  }
16018 #endif
16019 
16020 #if VMA_BIND_MEMORY2
16021  if(m_UseKhrBindMemory2)
16022  {
16023  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
16024  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
16025  }
16026 #endif // #if VMA_BIND_MEMORY2
16027 
16028 #if VMA_MEMORY_BUDGET
16029  if(m_UseExtMemoryBudget)
16030  {
16031  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
16032  }
16033 #endif // #if VMA_MEMORY_BUDGET
16034 
16035 #undef VMA_FETCH_DEVICE_FUNC
16036 #undef VMA_FETCH_INSTANCE_FUNC
16037 }
16038 
16039 #endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16040 
16041 void VmaAllocator_T::ValidateVulkanFunctions()
16042 {
16043  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
16044  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
16045  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
16046  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
16047  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
16048  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
16049  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
16050  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
16051  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
16052  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
16053  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
16054  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
16055  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
16056  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
16057  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
16058  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
16059  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
16060 
16061 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16062  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
16063  {
16064  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
16065  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
16066  }
16067 #endif
16068 
16069 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
16070  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
16071  {
16072  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
16073  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
16074  }
16075 #endif
16076 
16077 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
16078  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16079  {
16080  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
16081  }
16082 #endif
16083 }
16084 
16085 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
16086 {
16087  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16088  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16089  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
16090  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
16091 }
16092 
16093 VkResult VmaAllocator_T::AllocateMemoryOfType(
16094  VkDeviceSize size,
16095  VkDeviceSize alignment,
16096  bool dedicatedAllocation,
16097  VkBuffer dedicatedBuffer,
16098  VkBufferUsageFlags dedicatedBufferUsage,
16099  VkImage dedicatedImage,
16100  const VmaAllocationCreateInfo& createInfo,
16101  uint32_t memTypeIndex,
16102  VmaSuballocationType suballocType,
16103  size_t allocationCount,
16104  VmaAllocation* pAllocations)
16105 {
16106  VMA_ASSERT(pAllocations != VMA_NULL);
16107  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
16108 
16109  VmaAllocationCreateInfo finalCreateInfo = createInfo;
16110 
16111  // If memory type is not HOST_VISIBLE, disable MAPPED.
16112  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16113  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16114  {
16115  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16116  }
16117  // If memory is lazily allocated, it should be always dedicated.
16118  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
16119  {
16121  }
16122 
16123  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
16124  VMA_ASSERT(blockVector);
16125 
16126  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
16127  bool preferDedicatedMemory =
16128  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
16129  dedicatedAllocation ||
16130  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
16131  size > preferredBlockSize / 2;
16132 
16133  if(preferDedicatedMemory &&
16134  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
16135  finalCreateInfo.pool == VK_NULL_HANDLE)
16136  {
16138  }
16139 
16140  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
16141  {
16142  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16143  {
16144  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16145  }
16146  else
16147  {
16148  return AllocateDedicatedMemory(
16149  size,
16150  suballocType,
16151  memTypeIndex,
16152  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16153  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16154  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16155  finalCreateInfo.pUserData,
16156  dedicatedBuffer,
16157  dedicatedBufferUsage,
16158  dedicatedImage,
16159  allocationCount,
16160  pAllocations);
16161  }
16162  }
16163  else
16164  {
16165  VkResult res = blockVector->Allocate(
16166  m_CurrentFrameIndex.load(),
16167  size,
16168  alignment,
16169  finalCreateInfo,
16170  suballocType,
16171  allocationCount,
16172  pAllocations);
16173  if(res == VK_SUCCESS)
16174  {
16175  return res;
16176  }
16177 
16178  // 5. Try dedicated memory.
16179  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16180  {
16181  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16182  }
16183  else
16184  {
16185  res = AllocateDedicatedMemory(
16186  size,
16187  suballocType,
16188  memTypeIndex,
16189  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16190  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16191  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16192  finalCreateInfo.pUserData,
16193  dedicatedBuffer,
16194  dedicatedBufferUsage,
16195  dedicatedImage,
16196  allocationCount,
16197  pAllocations);
16198  if(res == VK_SUCCESS)
16199  {
16200  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
16201  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
16202  return VK_SUCCESS;
16203  }
16204  else
16205  {
16206  // Everything failed: Return error code.
16207  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16208  return res;
16209  }
16210  }
16211  }
16212 }
16213 
16214 VkResult VmaAllocator_T::AllocateDedicatedMemory(
16215  VkDeviceSize size,
16216  VmaSuballocationType suballocType,
16217  uint32_t memTypeIndex,
16218  bool withinBudget,
16219  bool map,
16220  bool isUserDataString,
16221  void* pUserData,
16222  VkBuffer dedicatedBuffer,
16223  VkBufferUsageFlags dedicatedBufferUsage,
16224  VkImage dedicatedImage,
16225  size_t allocationCount,
16226  VmaAllocation* pAllocations)
16227 {
16228  VMA_ASSERT(allocationCount > 0 && pAllocations);
16229 
16230  if(withinBudget)
16231  {
16232  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16233  VmaBudget heapBudget = {};
16234  GetBudget(&heapBudget, heapIndex, 1);
16235  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
16236  {
16237  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16238  }
16239  }
16240 
16241  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
16242  allocInfo.memoryTypeIndex = memTypeIndex;
16243  allocInfo.allocationSize = size;
16244 
16245 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16246  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
16247  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16248  {
16249  if(dedicatedBuffer != VK_NULL_HANDLE)
16250  {
16251  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
16252  dedicatedAllocInfo.buffer = dedicatedBuffer;
16253  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16254  }
16255  else if(dedicatedImage != VK_NULL_HANDLE)
16256  {
16257  dedicatedAllocInfo.image = dedicatedImage;
16258  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16259  }
16260  }
16261 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16262 
16263 #if VMA_BUFFER_DEVICE_ADDRESS
16264  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
16265  if(m_UseKhrBufferDeviceAddress)
16266  {
16267  bool canContainBufferWithDeviceAddress = true;
16268  if(dedicatedBuffer != VK_NULL_HANDLE)
16269  {
16270  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
16271  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
16272  }
16273  else if(dedicatedImage != VK_NULL_HANDLE)
16274  {
16275  canContainBufferWithDeviceAddress = false;
16276  }
16277  if(canContainBufferWithDeviceAddress)
16278  {
16279  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
16280  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
16281  }
16282  }
16283 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
16284 
16285  size_t allocIndex;
16286  VkResult res = VK_SUCCESS;
16287  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16288  {
16289  res = AllocateDedicatedMemoryPage(
16290  size,
16291  suballocType,
16292  memTypeIndex,
16293  allocInfo,
16294  map,
16295  isUserDataString,
16296  pUserData,
16297  pAllocations + allocIndex);
16298  if(res != VK_SUCCESS)
16299  {
16300  break;
16301  }
16302  }
16303 
16304  if(res == VK_SUCCESS)
16305  {
16306  // Register them in m_pDedicatedAllocations.
16307  {
16308  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16309  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16310  VMA_ASSERT(pDedicatedAllocations);
16311  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16312  {
16313  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
16314  }
16315  }
16316 
16317  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
16318  }
16319  else
16320  {
16321  // Free all already created allocations.
16322  while(allocIndex--)
16323  {
16324  VmaAllocation currAlloc = pAllocations[allocIndex];
16325  VkDeviceMemory hMemory = currAlloc->GetMemory();
16326 
16327  /*
16328  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16329  before vkFreeMemory.
16330 
16331  if(currAlloc->GetMappedData() != VMA_NULL)
16332  {
16333  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16334  }
16335  */
16336 
16337  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
16338  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
16339  currAlloc->SetUserData(this, VMA_NULL);
16340  m_AllocationObjectAllocator.Free(currAlloc);
16341  }
16342 
16343  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16344  }
16345 
16346  return res;
16347 }
16348 
16349 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
16350  VkDeviceSize size,
16351  VmaSuballocationType suballocType,
16352  uint32_t memTypeIndex,
16353  const VkMemoryAllocateInfo& allocInfo,
16354  bool map,
16355  bool isUserDataString,
16356  void* pUserData,
16357  VmaAllocation* pAllocation)
16358 {
16359  VkDeviceMemory hMemory = VK_NULL_HANDLE;
16360  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
16361  if(res < 0)
16362  {
16363  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16364  return res;
16365  }
16366 
16367  void* pMappedData = VMA_NULL;
16368  if(map)
16369  {
16370  res = (*m_VulkanFunctions.vkMapMemory)(
16371  m_hDevice,
16372  hMemory,
16373  0,
16374  VK_WHOLE_SIZE,
16375  0,
16376  &pMappedData);
16377  if(res < 0)
16378  {
16379  VMA_DEBUG_LOG(" vkMapMemory FAILED");
16380  FreeVulkanMemory(memTypeIndex, size, hMemory);
16381  return res;
16382  }
16383  }
16384 
16385  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
16386  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
16387  (*pAllocation)->SetUserData(this, pUserData);
16388  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
16389  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16390  {
16391  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
16392  }
16393 
16394  return VK_SUCCESS;
16395 }
16396 
16397 void VmaAllocator_T::GetBufferMemoryRequirements(
16398  VkBuffer hBuffer,
16399  VkMemoryRequirements& memReq,
16400  bool& requiresDedicatedAllocation,
16401  bool& prefersDedicatedAllocation) const
16402 {
16403 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16404  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16405  {
16406  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
16407  memReqInfo.buffer = hBuffer;
16408 
16409  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16410 
16411  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16412  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16413 
16414  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16415 
16416  memReq = memReq2.memoryRequirements;
16417  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16418  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16419  }
16420  else
16421 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16422  {
16423  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
16424  requiresDedicatedAllocation = false;
16425  prefersDedicatedAllocation = false;
16426  }
16427 }
16428 
16429 void VmaAllocator_T::GetImageMemoryRequirements(
16430  VkImage hImage,
16431  VkMemoryRequirements& memReq,
16432  bool& requiresDedicatedAllocation,
16433  bool& prefersDedicatedAllocation) const
16434 {
16435 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16436  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16437  {
16438  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
16439  memReqInfo.image = hImage;
16440 
16441  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16442 
16443  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16444  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16445 
16446  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16447 
16448  memReq = memReq2.memoryRequirements;
16449  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16450  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16451  }
16452  else
16453 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16454  {
16455  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
16456  requiresDedicatedAllocation = false;
16457  prefersDedicatedAllocation = false;
16458  }
16459 }
16460 
16461 VkResult VmaAllocator_T::AllocateMemory(
16462  const VkMemoryRequirements& vkMemReq,
16463  bool requiresDedicatedAllocation,
16464  bool prefersDedicatedAllocation,
16465  VkBuffer dedicatedBuffer,
16466  VkBufferUsageFlags dedicatedBufferUsage,
16467  VkImage dedicatedImage,
16468  const VmaAllocationCreateInfo& createInfo,
16469  VmaSuballocationType suballocType,
16470  size_t allocationCount,
16471  VmaAllocation* pAllocations)
16472 {
16473  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16474 
16475  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
16476 
16477  if(vkMemReq.size == 0)
16478  {
16479  return VK_ERROR_VALIDATION_FAILED_EXT;
16480  }
16481  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
16482  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16483  {
16484  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
16485  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16486  }
16487  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16489  {
16490  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
16491  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16492  }
16493  if(requiresDedicatedAllocation)
16494  {
16495  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16496  {
16497  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
16498  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16499  }
16500  if(createInfo.pool != VK_NULL_HANDLE)
16501  {
16502  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
16503  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16504  }
16505  }
16506  if((createInfo.pool != VK_NULL_HANDLE) &&
16507  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
16508  {
16509  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
16510  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16511  }
16512 
16513  if(createInfo.pool != VK_NULL_HANDLE)
16514  {
16515  const VkDeviceSize alignmentForPool = VMA_MAX(
16516  vkMemReq.alignment,
16517  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
16518 
16519  VmaAllocationCreateInfo createInfoForPool = createInfo;
16520  // If memory type is not HOST_VISIBLE, disable MAPPED.
16521  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16522  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16523  {
16524  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16525  }
16526 
16527  return createInfo.pool->m_BlockVector.Allocate(
16528  m_CurrentFrameIndex.load(),
16529  vkMemReq.size,
16530  alignmentForPool,
16531  createInfoForPool,
16532  suballocType,
16533  allocationCount,
16534  pAllocations);
16535  }
16536  else
16537  {
16538  // Bit mask of memory Vulkan types acceptable for this allocation.
16539  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
16540  uint32_t memTypeIndex = UINT32_MAX;
16541  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16542  if(res == VK_SUCCESS)
16543  {
16544  VkDeviceSize alignmentForMemType = VMA_MAX(
16545  vkMemReq.alignment,
16546  GetMemoryTypeMinAlignment(memTypeIndex));
16547 
16548  res = AllocateMemoryOfType(
16549  vkMemReq.size,
16550  alignmentForMemType,
16551  requiresDedicatedAllocation || prefersDedicatedAllocation,
16552  dedicatedBuffer,
16553  dedicatedBufferUsage,
16554  dedicatedImage,
16555  createInfo,
16556  memTypeIndex,
16557  suballocType,
16558  allocationCount,
16559  pAllocations);
16560  // Succeeded on first try.
16561  if(res == VK_SUCCESS)
16562  {
16563  return res;
16564  }
16565  // Allocation from this memory type failed. Try other compatible memory types.
16566  else
16567  {
16568  for(;;)
16569  {
16570  // Remove old memTypeIndex from list of possibilities.
16571  memoryTypeBits &= ~(1u << memTypeIndex);
16572  // Find alternative memTypeIndex.
16573  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16574  if(res == VK_SUCCESS)
16575  {
16576  alignmentForMemType = VMA_MAX(
16577  vkMemReq.alignment,
16578  GetMemoryTypeMinAlignment(memTypeIndex));
16579 
16580  res = AllocateMemoryOfType(
16581  vkMemReq.size,
16582  alignmentForMemType,
16583  requiresDedicatedAllocation || prefersDedicatedAllocation,
16584  dedicatedBuffer,
16585  dedicatedBufferUsage,
16586  dedicatedImage,
16587  createInfo,
16588  memTypeIndex,
16589  suballocType,
16590  allocationCount,
16591  pAllocations);
16592  // Allocation from this alternative memory type succeeded.
16593  if(res == VK_SUCCESS)
16594  {
16595  return res;
16596  }
16597  // else: Allocation from this memory type failed. Try next one - next loop iteration.
16598  }
16599  // No other matching memory type index could be found.
16600  else
16601  {
16602  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
16603  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16604  }
16605  }
16606  }
16607  }
16608  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
16609  else
16610  return res;
16611  }
16612 }
16613 
16614 void VmaAllocator_T::FreeMemory(
16615  size_t allocationCount,
16616  const VmaAllocation* pAllocations)
16617 {
16618  VMA_ASSERT(pAllocations);
16619 
16620  for(size_t allocIndex = allocationCount; allocIndex--; )
16621  {
16622  VmaAllocation allocation = pAllocations[allocIndex];
16623 
16624  if(allocation != VK_NULL_HANDLE)
16625  {
16626  if(TouchAllocation(allocation))
16627  {
16628  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16629  {
16630  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
16631  }
16632 
16633  switch(allocation->GetType())
16634  {
16635  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16636  {
16637  VmaBlockVector* pBlockVector = VMA_NULL;
16638  VmaPool hPool = allocation->GetBlock()->GetParentPool();
16639  if(hPool != VK_NULL_HANDLE)
16640  {
16641  pBlockVector = &hPool->m_BlockVector;
16642  }
16643  else
16644  {
16645  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16646  pBlockVector = m_pBlockVectors[memTypeIndex];
16647  }
16648  pBlockVector->Free(allocation);
16649  }
16650  break;
16651  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16652  FreeDedicatedMemory(allocation);
16653  break;
16654  default:
16655  VMA_ASSERT(0);
16656  }
16657  }
16658 
16659  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
16660  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
16661  allocation->SetUserData(this, VMA_NULL);
16662  m_AllocationObjectAllocator.Free(allocation);
16663  }
16664  }
16665 }
16666 
16667 VkResult VmaAllocator_T::ResizeAllocation(
16668  const VmaAllocation alloc,
16669  VkDeviceSize newSize)
16670 {
16671  // This function is deprecated and so it does nothing. It's left for backward compatibility.
16672  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
16673  {
16674  return VK_ERROR_VALIDATION_FAILED_EXT;
16675  }
16676  if(newSize == alloc->GetSize())
16677  {
16678  return VK_SUCCESS;
16679  }
16680  return VK_ERROR_OUT_OF_POOL_MEMORY;
16681 }
16682 
16683 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
16684 {
16685  // Initialize.
16686  InitStatInfo(pStats->total);
16687  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
16688  InitStatInfo(pStats->memoryType[i]);
16689  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
16690  InitStatInfo(pStats->memoryHeap[i]);
16691 
16692  // Process default pools.
16693  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16694  {
16695  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16696  VMA_ASSERT(pBlockVector);
16697  pBlockVector->AddStats(pStats);
16698  }
16699 
16700  // Process custom pools.
16701  {
16702  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16703  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16704  {
16705  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
16706  }
16707  }
16708 
16709  // Process dedicated allocations.
16710  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16711  {
16712  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16713  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16714  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16715  VMA_ASSERT(pDedicatedAllocVector);
16716  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
16717  {
16718  VmaStatInfo allocationStatInfo;
16719  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
16720  VmaAddStatInfo(pStats->total, allocationStatInfo);
16721  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
16722  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
16723  }
16724  }
16725 
16726  // Postprocess.
16727  VmaPostprocessCalcStatInfo(pStats->total);
16728  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
16729  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
16730  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
16731  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
16732 }
16733 
16734 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
16735 {
16736 #if VMA_MEMORY_BUDGET
16737  if(m_UseExtMemoryBudget)
16738  {
16739  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
16740  {
16741  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
16742  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16743  {
16744  const uint32_t heapIndex = firstHeap + i;
16745 
16746  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16747  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16748 
16749  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
16750  {
16751  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
16752  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
16753  }
16754  else
16755  {
16756  outBudget->usage = 0;
16757  }
16758 
16759  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
16760  outBudget->budget = VMA_MIN(
16761  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
16762  }
16763  }
16764  else
16765  {
16766  UpdateVulkanBudget(); // Outside of mutex lock
16767  GetBudget(outBudget, firstHeap, heapCount); // Recursion
16768  }
16769  }
16770  else
16771 #endif
16772  {
16773  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16774  {
16775  const uint32_t heapIndex = firstHeap + i;
16776 
16777  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16778  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16779 
16780  outBudget->usage = outBudget->blockBytes;
16781  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
16782  }
16783  }
16784 }
16785 
16786 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
16787 
16788 VkResult VmaAllocator_T::DefragmentationBegin(
16789  const VmaDefragmentationInfo2& info,
16790  VmaDefragmentationStats* pStats,
16791  VmaDefragmentationContext* pContext)
16792 {
16793  if(info.pAllocationsChanged != VMA_NULL)
16794  {
16795  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
16796  }
16797 
16798  *pContext = vma_new(this, VmaDefragmentationContext_T)(
16799  this, m_CurrentFrameIndex.load(), info.flags, pStats);
16800 
16801  (*pContext)->AddPools(info.poolCount, info.pPools);
16802  (*pContext)->AddAllocations(
16804 
16805  VkResult res = (*pContext)->Defragment(
16808  info.commandBuffer, pStats, info.flags);
16809 
16810  if(res != VK_NOT_READY)
16811  {
16812  vma_delete(this, *pContext);
16813  *pContext = VMA_NULL;
16814  }
16815 
16816  return res;
16817 }
16818 
16819 VkResult VmaAllocator_T::DefragmentationEnd(
16820  VmaDefragmentationContext context)
16821 {
16822  vma_delete(this, context);
16823  return VK_SUCCESS;
16824 }
16825 
16826 VkResult VmaAllocator_T::DefragmentationPassBegin(
16828  VmaDefragmentationContext context)
16829 {
16830  return context->DefragmentPassBegin(pInfo);
16831 }
16832 VkResult VmaAllocator_T::DefragmentationPassEnd(
16833  VmaDefragmentationContext context)
16834 {
16835  return context->DefragmentPassEnd();
16836 
16837 }
16838 
16839 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
16840 {
16841  if(hAllocation->CanBecomeLost())
16842  {
16843  /*
16844  Warning: This is a carefully designed algorithm.
16845  Do not modify unless you really know what you're doing :)
16846  */
16847  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16848  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16849  for(;;)
16850  {
16851  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16852  {
16853  pAllocationInfo->memoryType = UINT32_MAX;
16854  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
16855  pAllocationInfo->offset = 0;
16856  pAllocationInfo->size = hAllocation->GetSize();
16857  pAllocationInfo->pMappedData = VMA_NULL;
16858  pAllocationInfo->pUserData = hAllocation->GetUserData();
16859  return;
16860  }
16861  else if(localLastUseFrameIndex == localCurrFrameIndex)
16862  {
16863  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16864  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16865  pAllocationInfo->offset = hAllocation->GetOffset();
16866  pAllocationInfo->size = hAllocation->GetSize();
16867  pAllocationInfo->pMappedData = VMA_NULL;
16868  pAllocationInfo->pUserData = hAllocation->GetUserData();
16869  return;
16870  }
16871  else // Last use time earlier than current time.
16872  {
16873  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16874  {
16875  localLastUseFrameIndex = localCurrFrameIndex;
16876  }
16877  }
16878  }
16879  }
16880  else
16881  {
16882 #if VMA_STATS_STRING_ENABLED
16883  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16884  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16885  for(;;)
16886  {
16887  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16888  if(localLastUseFrameIndex == localCurrFrameIndex)
16889  {
16890  break;
16891  }
16892  else // Last use time earlier than current time.
16893  {
16894  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16895  {
16896  localLastUseFrameIndex = localCurrFrameIndex;
16897  }
16898  }
16899  }
16900 #endif
16901 
16902  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16903  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16904  pAllocationInfo->offset = hAllocation->GetOffset();
16905  pAllocationInfo->size = hAllocation->GetSize();
16906  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
16907  pAllocationInfo->pUserData = hAllocation->GetUserData();
16908  }
16909 }
16910 
16911 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
16912 {
16913  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
16914  if(hAllocation->CanBecomeLost())
16915  {
16916  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16917  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16918  for(;;)
16919  {
16920  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16921  {
16922  return false;
16923  }
16924  else if(localLastUseFrameIndex == localCurrFrameIndex)
16925  {
16926  return true;
16927  }
16928  else // Last use time earlier than current time.
16929  {
16930  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16931  {
16932  localLastUseFrameIndex = localCurrFrameIndex;
16933  }
16934  }
16935  }
16936  }
16937  else
16938  {
16939 #if VMA_STATS_STRING_ENABLED
16940  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16941  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16942  for(;;)
16943  {
16944  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16945  if(localLastUseFrameIndex == localCurrFrameIndex)
16946  {
16947  break;
16948  }
16949  else // Last use time earlier than current time.
16950  {
16951  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16952  {
16953  localLastUseFrameIndex = localCurrFrameIndex;
16954  }
16955  }
16956  }
16957 #endif
16958 
16959  return true;
16960  }
16961 }
16962 
16963 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
16964 {
16965  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
16966 
16967  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
16968 
16969  if(newCreateInfo.maxBlockCount == 0)
16970  {
16971  newCreateInfo.maxBlockCount = SIZE_MAX;
16972  }
16973  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
16974  {
16975  return VK_ERROR_INITIALIZATION_FAILED;
16976  }
16977  // Memory type index out of range or forbidden.
16978  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
16979  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
16980  {
16981  return VK_ERROR_FEATURE_NOT_PRESENT;
16982  }
16983 
16984  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
16985 
16986  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
16987 
16988  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
16989  if(res != VK_SUCCESS)
16990  {
16991  vma_delete(this, *pPool);
16992  *pPool = VMA_NULL;
16993  return res;
16994  }
16995 
16996  // Add to m_Pools.
16997  {
16998  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16999  (*pPool)->SetId(m_NextPoolId++);
17000  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
17001  }
17002 
17003  return VK_SUCCESS;
17004 }
17005 
17006 void VmaAllocator_T::DestroyPool(VmaPool pool)
17007 {
17008  // Remove from m_Pools.
17009  {
17010  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
17011  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
17012  VMA_ASSERT(success && "Pool not found in Allocator.");
17013  }
17014 
17015  vma_delete(this, pool);
17016 }
17017 
17018 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
17019 {
17020  pool->m_BlockVector.GetPoolStats(pPoolStats);
17021 }
17022 
17023 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
17024 {
17025  m_CurrentFrameIndex.store(frameIndex);
17026 
17027 #if VMA_MEMORY_BUDGET
17028  if(m_UseExtMemoryBudget)
17029  {
17030  UpdateVulkanBudget();
17031  }
17032 #endif // #if VMA_MEMORY_BUDGET
17033 }
17034 
17035 void VmaAllocator_T::MakePoolAllocationsLost(
17036  VmaPool hPool,
17037  size_t* pLostAllocationCount)
17038 {
17039  hPool->m_BlockVector.MakePoolAllocationsLost(
17040  m_CurrentFrameIndex.load(),
17041  pLostAllocationCount);
17042 }
17043 
17044 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
17045 {
17046  return hPool->m_BlockVector.CheckCorruption();
17047 }
17048 
17049 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
17050 {
17051  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
17052 
17053  // Process default pools.
17054  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17055  {
17056  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
17057  {
17058  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
17059  VMA_ASSERT(pBlockVector);
17060  VkResult localRes = pBlockVector->CheckCorruption();
17061  switch(localRes)
17062  {
17063  case VK_ERROR_FEATURE_NOT_PRESENT:
17064  break;
17065  case VK_SUCCESS:
17066  finalRes = VK_SUCCESS;
17067  break;
17068  default:
17069  return localRes;
17070  }
17071  }
17072  }
17073 
17074  // Process custom pools.
17075  {
17076  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17077  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
17078  {
17079  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
17080  {
17081  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
17082  switch(localRes)
17083  {
17084  case VK_ERROR_FEATURE_NOT_PRESENT:
17085  break;
17086  case VK_SUCCESS:
17087  finalRes = VK_SUCCESS;
17088  break;
17089  default:
17090  return localRes;
17091  }
17092  }
17093  }
17094  }
17095 
17096  return finalRes;
17097 }
17098 
17099 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
17100 {
17101  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
17102  (*pAllocation)->InitLost();
17103 }
17104 
17105 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
17106 {
17107  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
17108 
17109  // HeapSizeLimit is in effect for this heap.
17110  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
17111  {
17112  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
17113  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
17114  for(;;)
17115  {
17116  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
17117  if(blockBytesAfterAllocation > heapSize)
17118  {
17119  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
17120  }
17121  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
17122  {
17123  break;
17124  }
17125  }
17126  }
17127  else
17128  {
17129  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
17130  }
17131 
17132  // VULKAN CALL vkAllocateMemory.
17133  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
17134 
17135  if(res == VK_SUCCESS)
17136  {
17137 #if VMA_MEMORY_BUDGET
17138  ++m_Budget.m_OperationsSinceBudgetFetch;
17139 #endif
17140 
17141  // Informative callback.
17142  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
17143  {
17144  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
17145  }
17146  }
17147  else
17148  {
17149  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
17150  }
17151 
17152  return res;
17153 }
17154 
17155 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
17156 {
17157  // Informative callback.
17158  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
17159  {
17160  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
17161  }
17162 
17163  // VULKAN CALL vkFreeMemory.
17164  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
17165 
17166  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
17167 }
17168 
17169 VkResult VmaAllocator_T::BindVulkanBuffer(
17170  VkDeviceMemory memory,
17171  VkDeviceSize memoryOffset,
17172  VkBuffer buffer,
17173  const void* pNext)
17174 {
17175  if(pNext != VMA_NULL)
17176  {
17177 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17178  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17179  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
17180  {
17181  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
17182  bindBufferMemoryInfo.pNext = pNext;
17183  bindBufferMemoryInfo.buffer = buffer;
17184  bindBufferMemoryInfo.memory = memory;
17185  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17186  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17187  }
17188  else
17189 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17190  {
17191  return VK_ERROR_EXTENSION_NOT_PRESENT;
17192  }
17193  }
17194  else
17195  {
17196  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
17197  }
17198 }
17199 
17200 VkResult VmaAllocator_T::BindVulkanImage(
17201  VkDeviceMemory memory,
17202  VkDeviceSize memoryOffset,
17203  VkImage image,
17204  const void* pNext)
17205 {
17206  if(pNext != VMA_NULL)
17207  {
17208 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17209  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17210  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
17211  {
17212  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
17213  bindBufferMemoryInfo.pNext = pNext;
17214  bindBufferMemoryInfo.image = image;
17215  bindBufferMemoryInfo.memory = memory;
17216  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17217  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17218  }
17219  else
17220 #endif // #if VMA_BIND_MEMORY2
17221  {
17222  return VK_ERROR_EXTENSION_NOT_PRESENT;
17223  }
17224  }
17225  else
17226  {
17227  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
17228  }
17229 }
17230 
17231 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
17232 {
17233  if(hAllocation->CanBecomeLost())
17234  {
17235  return VK_ERROR_MEMORY_MAP_FAILED;
17236  }
17237 
17238  switch(hAllocation->GetType())
17239  {
17240  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17241  {
17242  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17243  char *pBytes = VMA_NULL;
17244  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
17245  if(res == VK_SUCCESS)
17246  {
17247  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
17248  hAllocation->BlockAllocMap();
17249  }
17250  return res;
17251  }
17252  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17253  return hAllocation->DedicatedAllocMap(this, ppData);
17254  default:
17255  VMA_ASSERT(0);
17256  return VK_ERROR_MEMORY_MAP_FAILED;
17257  }
17258 }
17259 
17260 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
17261 {
17262  switch(hAllocation->GetType())
17263  {
17264  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17265  {
17266  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17267  hAllocation->BlockAllocUnmap();
17268  pBlock->Unmap(this, 1);
17269  }
17270  break;
17271  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17272  hAllocation->DedicatedAllocUnmap(this);
17273  break;
17274  default:
17275  VMA_ASSERT(0);
17276  }
17277 }
17278 
17279 VkResult VmaAllocator_T::BindBufferMemory(
17280  VmaAllocation hAllocation,
17281  VkDeviceSize allocationLocalOffset,
17282  VkBuffer hBuffer,
17283  const void* pNext)
17284 {
17285  VkResult res = VK_SUCCESS;
17286  switch(hAllocation->GetType())
17287  {
17288  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17289  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
17290  break;
17291  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17292  {
17293  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17294  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
17295  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
17296  break;
17297  }
17298  default:
17299  VMA_ASSERT(0);
17300  }
17301  return res;
17302 }
17303 
17304 VkResult VmaAllocator_T::BindImageMemory(
17305  VmaAllocation hAllocation,
17306  VkDeviceSize allocationLocalOffset,
17307  VkImage hImage,
17308  const void* pNext)
17309 {
17310  VkResult res = VK_SUCCESS;
17311  switch(hAllocation->GetType())
17312  {
17313  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17314  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
17315  break;
17316  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17317  {
17318  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
17319  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
17320  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
17321  break;
17322  }
17323  default:
17324  VMA_ASSERT(0);
17325  }
17326  return res;
17327 }
17328 
17329 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
17330  VmaAllocation hAllocation,
17331  VkDeviceSize offset, VkDeviceSize size,
17332  VMA_CACHE_OPERATION op)
17333 {
17334  VkResult res = VK_SUCCESS;
17335 
17336  VkMappedMemoryRange memRange = {};
17337  if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
17338  {
17339  switch(op)
17340  {
17341  case VMA_CACHE_FLUSH:
17342  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
17343  break;
17344  case VMA_CACHE_INVALIDATE:
17345  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
17346  break;
17347  default:
17348  VMA_ASSERT(0);
17349  }
17350  }
17351  // else: Just ignore this call.
17352  return res;
17353 }
17354 
17355 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
17356  uint32_t allocationCount,
17357  const VmaAllocation* allocations,
17358  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
17359  VMA_CACHE_OPERATION op)
17360 {
17361  typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
17362  typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
17363  RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
17364 
17365  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
17366  {
17367  const VmaAllocation alloc = allocations[allocIndex];
17368  const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
17369  const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
17370  VkMappedMemoryRange newRange;
17371  if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
17372  {
17373  ranges.push_back(newRange);
17374  }
17375  }
17376 
17377  VkResult res = VK_SUCCESS;
17378  if(!ranges.empty())
17379  {
17380  switch(op)
17381  {
17382  case VMA_CACHE_FLUSH:
17383  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17384  break;
17385  case VMA_CACHE_INVALIDATE:
17386  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17387  break;
17388  default:
17389  VMA_ASSERT(0);
17390  }
17391  }
17392  // else: Just ignore this call.
17393  return res;
17394 }
17395 
17396 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
17397 {
17398  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
17399 
17400  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17401  {
17402  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17403  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
17404  VMA_ASSERT(pDedicatedAllocations);
17405  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
17406  VMA_ASSERT(success);
17407  }
17408 
17409  VkDeviceMemory hMemory = allocation->GetMemory();
17410 
17411  /*
17412  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
17413  before vkFreeMemory.
17414 
17415  if(allocation->GetMappedData() != VMA_NULL)
17416  {
17417  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
17418  }
17419  */
17420 
17421  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
17422 
17423  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
17424 }
17425 
17426 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
17427 {
17428  VkBufferCreateInfo dummyBufCreateInfo;
17429  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
17430 
17431  uint32_t memoryTypeBits = 0;
17432 
17433  // Create buffer.
17434  VkBuffer buf = VK_NULL_HANDLE;
17435  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
17436  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
17437  if(res == VK_SUCCESS)
17438  {
17439  // Query for supported memory types.
17440  VkMemoryRequirements memReq;
17441  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
17442  memoryTypeBits = memReq.memoryTypeBits;
17443 
17444  // Destroy buffer.
17445  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
17446  }
17447 
17448  return memoryTypeBits;
17449 }
17450 
17451 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
17452 {
17453  // Make sure memory information is already fetched.
17454  VMA_ASSERT(GetMemoryTypeCount() > 0);
17455 
17456  uint32_t memoryTypeBits = UINT32_MAX;
17457 
17458  if(!m_UseAmdDeviceCoherentMemory)
17459  {
17460  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
17461  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17462  {
17463  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17464  {
17465  memoryTypeBits &= ~(1u << memTypeIndex);
17466  }
17467  }
17468  }
17469 
17470  return memoryTypeBits;
17471 }
17472 
17473 bool VmaAllocator_T::GetFlushOrInvalidateRange(
17474  VmaAllocation allocation,
17475  VkDeviceSize offset, VkDeviceSize size,
17476  VkMappedMemoryRange& outRange) const
17477 {
17478  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17479  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
17480  {
17481  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
17482  const VkDeviceSize allocationSize = allocation->GetSize();
17483  VMA_ASSERT(offset <= allocationSize);
17484 
17485  outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
17486  outRange.pNext = VMA_NULL;
17487  outRange.memory = allocation->GetMemory();
17488 
17489  switch(allocation->GetType())
17490  {
17491  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17492  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17493  if(size == VK_WHOLE_SIZE)
17494  {
17495  outRange.size = allocationSize - outRange.offset;
17496  }
17497  else
17498  {
17499  VMA_ASSERT(offset + size <= allocationSize);
17500  outRange.size = VMA_MIN(
17501  VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
17502  allocationSize - outRange.offset);
17503  }
17504  break;
17505  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17506  {
17507  // 1. Still within this allocation.
17508  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17509  if(size == VK_WHOLE_SIZE)
17510  {
17511  size = allocationSize - offset;
17512  }
17513  else
17514  {
17515  VMA_ASSERT(offset + size <= allocationSize);
17516  }
17517  outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
17518 
17519  // 2. Adjust to whole block.
17520  const VkDeviceSize allocationOffset = allocation->GetOffset();
17521  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
17522  const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
17523  outRange.offset += allocationOffset;
17524  outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
17525 
17526  break;
17527  }
17528  default:
17529  VMA_ASSERT(0);
17530  }
17531  return true;
17532  }
17533  return false;
17534 }
17535 
17536 #if VMA_MEMORY_BUDGET
17537 
17538 void VmaAllocator_T::UpdateVulkanBudget()
17539 {
17540  VMA_ASSERT(m_UseExtMemoryBudget);
17541 
17542  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
17543 
17544  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
17545  VmaPnextChainPushFront(&memProps, &budgetProps);
17546 
17547  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
17548 
17549  {
17550  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
17551 
17552  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
17553  {
17554  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
17555  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
17556  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
17557 
17558  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
17559  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
17560  {
17561  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
17562  }
17563  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
17564  {
17565  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
17566  }
17567  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
17568  {
17569  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
17570  }
17571  }
17572  m_Budget.m_OperationsSinceBudgetFetch = 0;
17573  }
17574 }
17575 
17576 #endif // #if VMA_MEMORY_BUDGET
17577 
17578 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
17579 {
17580  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
17581  !hAllocation->CanBecomeLost() &&
17582  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17583  {
17584  void* pData = VMA_NULL;
17585  VkResult res = Map(hAllocation, &pData);
17586  if(res == VK_SUCCESS)
17587  {
17588  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
17589  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
17590  Unmap(hAllocation);
17591  }
17592  else
17593  {
17594  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
17595  }
17596  }
17597 }
17598 
17599 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
17600 {
17601  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
17602  if(memoryTypeBits == UINT32_MAX)
17603  {
17604  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
17605  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
17606  }
17607  return memoryTypeBits;
17608 }
17609 
17610 #if VMA_STATS_STRING_ENABLED
17611 
17612 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
17613 {
17614  bool dedicatedAllocationsStarted = false;
17615  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17616  {
17617  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17618  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
17619  VMA_ASSERT(pDedicatedAllocVector);
17620  if(pDedicatedAllocVector->empty() == false)
17621  {
17622  if(dedicatedAllocationsStarted == false)
17623  {
17624  dedicatedAllocationsStarted = true;
17625  json.WriteString("DedicatedAllocations");
17626  json.BeginObject();
17627  }
17628 
17629  json.BeginString("Type ");
17630  json.ContinueString(memTypeIndex);
17631  json.EndString();
17632 
17633  json.BeginArray();
17634 
17635  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
17636  {
17637  json.BeginObject(true);
17638  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
17639  hAlloc->PrintParameters(json);
17640  json.EndObject();
17641  }
17642 
17643  json.EndArray();
17644  }
17645  }
17646  if(dedicatedAllocationsStarted)
17647  {
17648  json.EndObject();
17649  }
17650 
17651  {
17652  bool allocationsStarted = false;
17653  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17654  {
17655  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
17656  {
17657  if(allocationsStarted == false)
17658  {
17659  allocationsStarted = true;
17660  json.WriteString("DefaultPools");
17661  json.BeginObject();
17662  }
17663 
17664  json.BeginString("Type ");
17665  json.ContinueString(memTypeIndex);
17666  json.EndString();
17667 
17668  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
17669  }
17670  }
17671  if(allocationsStarted)
17672  {
17673  json.EndObject();
17674  }
17675  }
17676 
17677  // Custom pools
17678  {
17679  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17680  const size_t poolCount = m_Pools.size();
17681  if(poolCount > 0)
17682  {
17683  json.WriteString("Pools");
17684  json.BeginObject();
17685  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
17686  {
17687  json.BeginString();
17688  json.ContinueString(m_Pools[poolIndex]->GetId());
17689  json.EndString();
17690 
17691  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
17692  }
17693  json.EndObject();
17694  }
17695  }
17696 }
17697 
17698 #endif // #if VMA_STATS_STRING_ENABLED
17699 
17701 // Public interface
17702 
17703 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
17704  const VmaAllocatorCreateInfo* pCreateInfo,
17705  VmaAllocator* pAllocator)
17706 {
17707  VMA_ASSERT(pCreateInfo && pAllocator);
17708  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
17709  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
17710  VMA_DEBUG_LOG("vmaCreateAllocator");
17711  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
17712  return (*pAllocator)->Init(pCreateInfo);
17713 }
17714 
17715 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
17716  VmaAllocator allocator)
17717 {
17718  if(allocator != VK_NULL_HANDLE)
17719  {
17720  VMA_DEBUG_LOG("vmaDestroyAllocator");
17721  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
17722  vma_delete(&allocationCallbacks, allocator);
17723  }
17724 }
17725 
17726 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
17727 {
17728  VMA_ASSERT(allocator && pAllocatorInfo);
17729  pAllocatorInfo->instance = allocator->m_hInstance;
17730  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
17731  pAllocatorInfo->device = allocator->m_hDevice;
17732 }
17733 
17734 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
17735  VmaAllocator allocator,
17736  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
17737 {
17738  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
17739  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
17740 }
17741 
17742 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
17743  VmaAllocator allocator,
17744  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
17745 {
17746  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
17747  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
17748 }
17749 
17750 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
17751  VmaAllocator allocator,
17752  uint32_t memoryTypeIndex,
17753  VkMemoryPropertyFlags* pFlags)
17754 {
17755  VMA_ASSERT(allocator && pFlags);
17756  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
17757  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
17758 }
17759 
17760 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
17761  VmaAllocator allocator,
17762  uint32_t frameIndex)
17763 {
17764  VMA_ASSERT(allocator);
17765  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
17766 
17767  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17768 
17769  allocator->SetCurrentFrameIndex(frameIndex);
17770 }
17771 
17772 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
17773  VmaAllocator allocator,
17774  VmaStats* pStats)
17775 {
17776  VMA_ASSERT(allocator && pStats);
17777  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17778  allocator->CalculateStats(pStats);
17779 }
17780 
17781 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
17782  VmaAllocator allocator,
17783  VmaBudget* pBudget)
17784 {
17785  VMA_ASSERT(allocator && pBudget);
17786  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17787  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
17788 }
17789 
17790 #if VMA_STATS_STRING_ENABLED
17791 
17792 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
17793  VmaAllocator allocator,
17794  char** ppStatsString,
17795  VkBool32 detailedMap)
17796 {
17797  VMA_ASSERT(allocator && ppStatsString);
17798  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17799 
17800  VmaStringBuilder sb(allocator);
17801  {
17802  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
17803  json.BeginObject();
17804 
17805  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
17806  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
17807 
17808  VmaStats stats;
17809  allocator->CalculateStats(&stats);
17810 
17811  json.WriteString("Total");
17812  VmaPrintStatInfo(json, stats.total);
17813 
17814  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
17815  {
17816  json.BeginString("Heap ");
17817  json.ContinueString(heapIndex);
17818  json.EndString();
17819  json.BeginObject();
17820 
17821  json.WriteString("Size");
17822  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
17823 
17824  json.WriteString("Flags");
17825  json.BeginArray(true);
17826  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
17827  {
17828  json.WriteString("DEVICE_LOCAL");
17829  }
17830  json.EndArray();
17831 
17832  json.WriteString("Budget");
17833  json.BeginObject();
17834  {
17835  json.WriteString("BlockBytes");
17836  json.WriteNumber(budget[heapIndex].blockBytes);
17837  json.WriteString("AllocationBytes");
17838  json.WriteNumber(budget[heapIndex].allocationBytes);
17839  json.WriteString("Usage");
17840  json.WriteNumber(budget[heapIndex].usage);
17841  json.WriteString("Budget");
17842  json.WriteNumber(budget[heapIndex].budget);
17843  }
17844  json.EndObject();
17845 
17846  if(stats.memoryHeap[heapIndex].blockCount > 0)
17847  {
17848  json.WriteString("Stats");
17849  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
17850  }
17851 
17852  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
17853  {
17854  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
17855  {
17856  json.BeginString("Type ");
17857  json.ContinueString(typeIndex);
17858  json.EndString();
17859 
17860  json.BeginObject();
17861 
17862  json.WriteString("Flags");
17863  json.BeginArray(true);
17864  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
17865  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
17866  {
17867  json.WriteString("DEVICE_LOCAL");
17868  }
17869  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17870  {
17871  json.WriteString("HOST_VISIBLE");
17872  }
17873  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
17874  {
17875  json.WriteString("HOST_COHERENT");
17876  }
17877  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
17878  {
17879  json.WriteString("HOST_CACHED");
17880  }
17881  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
17882  {
17883  json.WriteString("LAZILY_ALLOCATED");
17884  }
17885  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
17886  {
17887  json.WriteString(" PROTECTED");
17888  }
17889  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17890  {
17891  json.WriteString(" DEVICE_COHERENT");
17892  }
17893  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
17894  {
17895  json.WriteString(" DEVICE_UNCACHED");
17896  }
17897  json.EndArray();
17898 
17899  if(stats.memoryType[typeIndex].blockCount > 0)
17900  {
17901  json.WriteString("Stats");
17902  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
17903  }
17904 
17905  json.EndObject();
17906  }
17907  }
17908 
17909  json.EndObject();
17910  }
17911  if(detailedMap == VK_TRUE)
17912  {
17913  allocator->PrintDetailedMap(json);
17914  }
17915 
17916  json.EndObject();
17917  }
17918 
17919  const size_t len = sb.GetLength();
17920  char* const pChars = vma_new_array(allocator, char, len + 1);
17921  if(len > 0)
17922  {
17923  memcpy(pChars, sb.GetData(), len);
17924  }
17925  pChars[len] = '\0';
17926  *ppStatsString = pChars;
17927 }
17928 
17929 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
17930  VmaAllocator allocator,
17931  char* pStatsString)
17932 {
17933  if(pStatsString != VMA_NULL)
17934  {
17935  VMA_ASSERT(allocator);
17936  size_t len = strlen(pStatsString);
17937  vma_delete_array(allocator, pStatsString, len + 1);
17938  }
17939 }
17940 
17941 #endif // #if VMA_STATS_STRING_ENABLED
17942 
17943 /*
17944 This function is not protected by any mutex because it just reads immutable data.
17945 */
17946 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
17947  VmaAllocator allocator,
17948  uint32_t memoryTypeBits,
17949  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17950  uint32_t* pMemoryTypeIndex)
17951 {
17952  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17953  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17954  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17955 
17956  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
17957 
17958  if(pAllocationCreateInfo->memoryTypeBits != 0)
17959  {
17960  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
17961  }
17962 
17963  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
17964  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
17965  uint32_t notPreferredFlags = 0;
17966 
17967  // Convert usage to requiredFlags and preferredFlags.
17968  switch(pAllocationCreateInfo->usage)
17969  {
17971  break;
17973  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17974  {
17975  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17976  }
17977  break;
17979  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
17980  break;
17982  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17983  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17984  {
17985  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17986  }
17987  break;
17989  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17990  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
17991  break;
17993  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17994  break;
17996  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
17997  break;
17998  default:
17999  VMA_ASSERT(0);
18000  break;
18001  }
18002 
18003  // Avoid DEVICE_COHERENT unless explicitly requested.
18004  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
18005  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
18006  {
18007  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
18008  }
18009 
18010  *pMemoryTypeIndex = UINT32_MAX;
18011  uint32_t minCost = UINT32_MAX;
18012  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
18013  memTypeIndex < allocator->GetMemoryTypeCount();
18014  ++memTypeIndex, memTypeBit <<= 1)
18015  {
18016  // This memory type is acceptable according to memoryTypeBits bitmask.
18017  if((memTypeBit & memoryTypeBits) != 0)
18018  {
18019  const VkMemoryPropertyFlags currFlags =
18020  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
18021  // This memory type contains requiredFlags.
18022  if((requiredFlags & ~currFlags) == 0)
18023  {
18024  // Calculate cost as number of bits from preferredFlags not present in this memory type.
18025  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
18026  VmaCountBitsSet(currFlags & notPreferredFlags);
18027  // Remember memory type with lowest cost.
18028  if(currCost < minCost)
18029  {
18030  *pMemoryTypeIndex = memTypeIndex;
18031  if(currCost == 0)
18032  {
18033  return VK_SUCCESS;
18034  }
18035  minCost = currCost;
18036  }
18037  }
18038  }
18039  }
18040  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
18041 }
18042 
18043 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
18044  VmaAllocator allocator,
18045  const VkBufferCreateInfo* pBufferCreateInfo,
18046  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18047  uint32_t* pMemoryTypeIndex)
18048 {
18049  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18050  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
18051  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18052  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18053 
18054  const VkDevice hDev = allocator->m_hDevice;
18055  VkBuffer hBuffer = VK_NULL_HANDLE;
18056  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
18057  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
18058  if(res == VK_SUCCESS)
18059  {
18060  VkMemoryRequirements memReq = {};
18061  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
18062  hDev, hBuffer, &memReq);
18063 
18064  res = vmaFindMemoryTypeIndex(
18065  allocator,
18066  memReq.memoryTypeBits,
18067  pAllocationCreateInfo,
18068  pMemoryTypeIndex);
18069 
18070  allocator->GetVulkanFunctions().vkDestroyBuffer(
18071  hDev, hBuffer, allocator->GetAllocationCallbacks());
18072  }
18073  return res;
18074 }
18075 
18076 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
18077  VmaAllocator allocator,
18078  const VkImageCreateInfo* pImageCreateInfo,
18079  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18080  uint32_t* pMemoryTypeIndex)
18081 {
18082  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18083  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
18084  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18085  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18086 
18087  const VkDevice hDev = allocator->m_hDevice;
18088  VkImage hImage = VK_NULL_HANDLE;
18089  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
18090  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
18091  if(res == VK_SUCCESS)
18092  {
18093  VkMemoryRequirements memReq = {};
18094  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
18095  hDev, hImage, &memReq);
18096 
18097  res = vmaFindMemoryTypeIndex(
18098  allocator,
18099  memReq.memoryTypeBits,
18100  pAllocationCreateInfo,
18101  pMemoryTypeIndex);
18102 
18103  allocator->GetVulkanFunctions().vkDestroyImage(
18104  hDev, hImage, allocator->GetAllocationCallbacks());
18105  }
18106  return res;
18107 }
18108 
18109 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
18110  VmaAllocator allocator,
18111  const VmaPoolCreateInfo* pCreateInfo,
18112  VmaPool* pPool)
18113 {
18114  VMA_ASSERT(allocator && pCreateInfo && pPool);
18115 
18116  VMA_DEBUG_LOG("vmaCreatePool");
18117 
18118  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18119 
18120  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
18121 
18122 #if VMA_RECORDING_ENABLED
18123  if(allocator->GetRecorder() != VMA_NULL)
18124  {
18125  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
18126  }
18127 #endif
18128 
18129  return res;
18130 }
18131 
18132 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
18133  VmaAllocator allocator,
18134  VmaPool pool)
18135 {
18136  VMA_ASSERT(allocator);
18137 
18138  if(pool == VK_NULL_HANDLE)
18139  {
18140  return;
18141  }
18142 
18143  VMA_DEBUG_LOG("vmaDestroyPool");
18144 
18145  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18146 
18147 #if VMA_RECORDING_ENABLED
18148  if(allocator->GetRecorder() != VMA_NULL)
18149  {
18150  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
18151  }
18152 #endif
18153 
18154  allocator->DestroyPool(pool);
18155 }
18156 
18157 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
18158  VmaAllocator allocator,
18159  VmaPool pool,
18160  VmaPoolStats* pPoolStats)
18161 {
18162  VMA_ASSERT(allocator && pool && pPoolStats);
18163 
18164  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18165 
18166  allocator->GetPoolStats(pool, pPoolStats);
18167 }
18168 
18169 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
18170  VmaAllocator allocator,
18171  VmaPool pool,
18172  size_t* pLostAllocationCount)
18173 {
18174  VMA_ASSERT(allocator && pool);
18175 
18176  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18177 
18178 #if VMA_RECORDING_ENABLED
18179  if(allocator->GetRecorder() != VMA_NULL)
18180  {
18181  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
18182  }
18183 #endif
18184 
18185  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
18186 }
18187 
18188 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
18189 {
18190  VMA_ASSERT(allocator && pool);
18191 
18192  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18193 
18194  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
18195 
18196  return allocator->CheckPoolCorruption(pool);
18197 }
18198 
18199 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
18200  VmaAllocator allocator,
18201  VmaPool pool,
18202  const char** ppName)
18203 {
18204  VMA_ASSERT(allocator && pool && ppName);
18205 
18206  VMA_DEBUG_LOG("vmaGetPoolName");
18207 
18208  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18209 
18210  *ppName = pool->GetName();
18211 }
18212 
18213 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
18214  VmaAllocator allocator,
18215  VmaPool pool,
18216  const char* pName)
18217 {
18218  VMA_ASSERT(allocator && pool);
18219 
18220  VMA_DEBUG_LOG("vmaSetPoolName");
18221 
18222  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18223 
18224  pool->SetName(pName);
18225 
18226 #if VMA_RECORDING_ENABLED
18227  if(allocator->GetRecorder() != VMA_NULL)
18228  {
18229  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
18230  }
18231 #endif
18232 }
18233 
18234 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
18235  VmaAllocator allocator,
18236  const VkMemoryRequirements* pVkMemoryRequirements,
18237  const VmaAllocationCreateInfo* pCreateInfo,
18238  VmaAllocation* pAllocation,
18239  VmaAllocationInfo* pAllocationInfo)
18240 {
18241  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
18242 
18243  VMA_DEBUG_LOG("vmaAllocateMemory");
18244 
18245  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18246 
18247  VkResult result = allocator->AllocateMemory(
18248  *pVkMemoryRequirements,
18249  false, // requiresDedicatedAllocation
18250  false, // prefersDedicatedAllocation
18251  VK_NULL_HANDLE, // dedicatedBuffer
18252  UINT32_MAX, // dedicatedBufferUsage
18253  VK_NULL_HANDLE, // dedicatedImage
18254  *pCreateInfo,
18255  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18256  1, // allocationCount
18257  pAllocation);
18258 
18259 #if VMA_RECORDING_ENABLED
18260  if(allocator->GetRecorder() != VMA_NULL)
18261  {
18262  allocator->GetRecorder()->RecordAllocateMemory(
18263  allocator->GetCurrentFrameIndex(),
18264  *pVkMemoryRequirements,
18265  *pCreateInfo,
18266  *pAllocation);
18267  }
18268 #endif
18269 
18270  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18271  {
18272  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18273  }
18274 
18275  return result;
18276 }
18277 
18278 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
18279  VmaAllocator allocator,
18280  const VkMemoryRequirements* pVkMemoryRequirements,
18281  const VmaAllocationCreateInfo* pCreateInfo,
18282  size_t allocationCount,
18283  VmaAllocation* pAllocations,
18284  VmaAllocationInfo* pAllocationInfo)
18285 {
18286  if(allocationCount == 0)
18287  {
18288  return VK_SUCCESS;
18289  }
18290 
18291  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
18292 
18293  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
18294 
18295  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18296 
18297  VkResult result = allocator->AllocateMemory(
18298  *pVkMemoryRequirements,
18299  false, // requiresDedicatedAllocation
18300  false, // prefersDedicatedAllocation
18301  VK_NULL_HANDLE, // dedicatedBuffer
18302  UINT32_MAX, // dedicatedBufferUsage
18303  VK_NULL_HANDLE, // dedicatedImage
18304  *pCreateInfo,
18305  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18306  allocationCount,
18307  pAllocations);
18308 
18309 #if VMA_RECORDING_ENABLED
18310  if(allocator->GetRecorder() != VMA_NULL)
18311  {
18312  allocator->GetRecorder()->RecordAllocateMemoryPages(
18313  allocator->GetCurrentFrameIndex(),
18314  *pVkMemoryRequirements,
18315  *pCreateInfo,
18316  (uint64_t)allocationCount,
18317  pAllocations);
18318  }
18319 #endif
18320 
18321  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18322  {
18323  for(size_t i = 0; i < allocationCount; ++i)
18324  {
18325  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
18326  }
18327  }
18328 
18329  return result;
18330 }
18331 
18332 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
18333  VmaAllocator allocator,
18334  VkBuffer buffer,
18335  const VmaAllocationCreateInfo* pCreateInfo,
18336  VmaAllocation* pAllocation,
18337  VmaAllocationInfo* pAllocationInfo)
18338 {
18339  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18340 
18341  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
18342 
18343  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18344 
18345  VkMemoryRequirements vkMemReq = {};
18346  bool requiresDedicatedAllocation = false;
18347  bool prefersDedicatedAllocation = false;
18348  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
18349  requiresDedicatedAllocation,
18350  prefersDedicatedAllocation);
18351 
18352  VkResult result = allocator->AllocateMemory(
18353  vkMemReq,
18354  requiresDedicatedAllocation,
18355  prefersDedicatedAllocation,
18356  buffer, // dedicatedBuffer
18357  UINT32_MAX, // dedicatedBufferUsage
18358  VK_NULL_HANDLE, // dedicatedImage
18359  *pCreateInfo,
18360  VMA_SUBALLOCATION_TYPE_BUFFER,
18361  1, // allocationCount
18362  pAllocation);
18363 
18364 #if VMA_RECORDING_ENABLED
18365  if(allocator->GetRecorder() != VMA_NULL)
18366  {
18367  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
18368  allocator->GetCurrentFrameIndex(),
18369  vkMemReq,
18370  requiresDedicatedAllocation,
18371  prefersDedicatedAllocation,
18372  *pCreateInfo,
18373  *pAllocation);
18374  }
18375 #endif
18376 
18377  if(pAllocationInfo && result == VK_SUCCESS)
18378  {
18379  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18380  }
18381 
18382  return result;
18383 }
18384 
18385 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
18386  VmaAllocator allocator,
18387  VkImage image,
18388  const VmaAllocationCreateInfo* pCreateInfo,
18389  VmaAllocation* pAllocation,
18390  VmaAllocationInfo* pAllocationInfo)
18391 {
18392  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18393 
18394  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
18395 
18396  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18397 
18398  VkMemoryRequirements vkMemReq = {};
18399  bool requiresDedicatedAllocation = false;
18400  bool prefersDedicatedAllocation = false;
18401  allocator->GetImageMemoryRequirements(image, vkMemReq,
18402  requiresDedicatedAllocation, prefersDedicatedAllocation);
18403 
18404  VkResult result = allocator->AllocateMemory(
18405  vkMemReq,
18406  requiresDedicatedAllocation,
18407  prefersDedicatedAllocation,
18408  VK_NULL_HANDLE, // dedicatedBuffer
18409  UINT32_MAX, // dedicatedBufferUsage
18410  image, // dedicatedImage
18411  *pCreateInfo,
18412  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
18413  1, // allocationCount
18414  pAllocation);
18415 
18416 #if VMA_RECORDING_ENABLED
18417  if(allocator->GetRecorder() != VMA_NULL)
18418  {
18419  allocator->GetRecorder()->RecordAllocateMemoryForImage(
18420  allocator->GetCurrentFrameIndex(),
18421  vkMemReq,
18422  requiresDedicatedAllocation,
18423  prefersDedicatedAllocation,
18424  *pCreateInfo,
18425  *pAllocation);
18426  }
18427 #endif
18428 
18429  if(pAllocationInfo && result == VK_SUCCESS)
18430  {
18431  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18432  }
18433 
18434  return result;
18435 }
18436 
18437 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
18438  VmaAllocator allocator,
18439  VmaAllocation allocation)
18440 {
18441  VMA_ASSERT(allocator);
18442 
18443  if(allocation == VK_NULL_HANDLE)
18444  {
18445  return;
18446  }
18447 
18448  VMA_DEBUG_LOG("vmaFreeMemory");
18449 
18450  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18451 
18452 #if VMA_RECORDING_ENABLED
18453  if(allocator->GetRecorder() != VMA_NULL)
18454  {
18455  allocator->GetRecorder()->RecordFreeMemory(
18456  allocator->GetCurrentFrameIndex(),
18457  allocation);
18458  }
18459 #endif
18460 
18461  allocator->FreeMemory(
18462  1, // allocationCount
18463  &allocation);
18464 }
18465 
18466 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
18467  VmaAllocator allocator,
18468  size_t allocationCount,
18469  const VmaAllocation* pAllocations)
18470 {
18471  if(allocationCount == 0)
18472  {
18473  return;
18474  }
18475 
18476  VMA_ASSERT(allocator);
18477 
18478  VMA_DEBUG_LOG("vmaFreeMemoryPages");
18479 
18480  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18481 
18482 #if VMA_RECORDING_ENABLED
18483  if(allocator->GetRecorder() != VMA_NULL)
18484  {
18485  allocator->GetRecorder()->RecordFreeMemoryPages(
18486  allocator->GetCurrentFrameIndex(),
18487  (uint64_t)allocationCount,
18488  pAllocations);
18489  }
18490 #endif
18491 
18492  allocator->FreeMemory(allocationCount, pAllocations);
18493 }
18494 
18495 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
18496  VmaAllocator allocator,
18497  VmaAllocation allocation,
18498  VkDeviceSize newSize)
18499 {
18500  VMA_ASSERT(allocator && allocation);
18501 
18502  VMA_DEBUG_LOG("vmaResizeAllocation");
18503 
18504  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18505 
18506  return allocator->ResizeAllocation(allocation, newSize);
18507 }
18508 
18509 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
18510  VmaAllocator allocator,
18511  VmaAllocation allocation,
18512  VmaAllocationInfo* pAllocationInfo)
18513 {
18514  VMA_ASSERT(allocator && allocation && pAllocationInfo);
18515 
18516  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18517 
18518 #if VMA_RECORDING_ENABLED
18519  if(allocator->GetRecorder() != VMA_NULL)
18520  {
18521  allocator->GetRecorder()->RecordGetAllocationInfo(
18522  allocator->GetCurrentFrameIndex(),
18523  allocation);
18524  }
18525 #endif
18526 
18527  allocator->GetAllocationInfo(allocation, pAllocationInfo);
18528 }
18529 
18530 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
18531  VmaAllocator allocator,
18532  VmaAllocation allocation)
18533 {
18534  VMA_ASSERT(allocator && allocation);
18535 
18536  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18537 
18538 #if VMA_RECORDING_ENABLED
18539  if(allocator->GetRecorder() != VMA_NULL)
18540  {
18541  allocator->GetRecorder()->RecordTouchAllocation(
18542  allocator->GetCurrentFrameIndex(),
18543  allocation);
18544  }
18545 #endif
18546 
18547  return allocator->TouchAllocation(allocation);
18548 }
18549 
18550 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
18551  VmaAllocator allocator,
18552  VmaAllocation allocation,
18553  void* pUserData)
18554 {
18555  VMA_ASSERT(allocator && allocation);
18556 
18557  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18558 
18559  allocation->SetUserData(allocator, pUserData);
18560 
18561 #if VMA_RECORDING_ENABLED
18562  if(allocator->GetRecorder() != VMA_NULL)
18563  {
18564  allocator->GetRecorder()->RecordSetAllocationUserData(
18565  allocator->GetCurrentFrameIndex(),
18566  allocation,
18567  pUserData);
18568  }
18569 #endif
18570 }
18571 
18572 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
18573  VmaAllocator allocator,
18574  VmaAllocation* pAllocation)
18575 {
18576  VMA_ASSERT(allocator && pAllocation);
18577 
18578  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
18579 
18580  allocator->CreateLostAllocation(pAllocation);
18581 
18582 #if VMA_RECORDING_ENABLED
18583  if(allocator->GetRecorder() != VMA_NULL)
18584  {
18585  allocator->GetRecorder()->RecordCreateLostAllocation(
18586  allocator->GetCurrentFrameIndex(),
18587  *pAllocation);
18588  }
18589 #endif
18590 }
18591 
18592 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
18593  VmaAllocator allocator,
18594  VmaAllocation allocation,
18595  void** ppData)
18596 {
18597  VMA_ASSERT(allocator && allocation && ppData);
18598 
18599  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18600 
18601  VkResult res = allocator->Map(allocation, ppData);
18602 
18603 #if VMA_RECORDING_ENABLED
18604  if(allocator->GetRecorder() != VMA_NULL)
18605  {
18606  allocator->GetRecorder()->RecordMapMemory(
18607  allocator->GetCurrentFrameIndex(),
18608  allocation);
18609  }
18610 #endif
18611 
18612  return res;
18613 }
18614 
18615 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
18616  VmaAllocator allocator,
18617  VmaAllocation allocation)
18618 {
18619  VMA_ASSERT(allocator && allocation);
18620 
18621  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18622 
18623 #if VMA_RECORDING_ENABLED
18624  if(allocator->GetRecorder() != VMA_NULL)
18625  {
18626  allocator->GetRecorder()->RecordUnmapMemory(
18627  allocator->GetCurrentFrameIndex(),
18628  allocation);
18629  }
18630 #endif
18631 
18632  allocator->Unmap(allocation);
18633 }
18634 
18635 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18636 {
18637  VMA_ASSERT(allocator && allocation);
18638 
18639  VMA_DEBUG_LOG("vmaFlushAllocation");
18640 
18641  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18642 
18643  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
18644 
18645 #if VMA_RECORDING_ENABLED
18646  if(allocator->GetRecorder() != VMA_NULL)
18647  {
18648  allocator->GetRecorder()->RecordFlushAllocation(
18649  allocator->GetCurrentFrameIndex(),
18650  allocation, offset, size);
18651  }
18652 #endif
18653 
18654  return res;
18655 }
18656 
18657 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18658 {
18659  VMA_ASSERT(allocator && allocation);
18660 
18661  VMA_DEBUG_LOG("vmaInvalidateAllocation");
18662 
18663  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18664 
18665  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
18666 
18667 #if VMA_RECORDING_ENABLED
18668  if(allocator->GetRecorder() != VMA_NULL)
18669  {
18670  allocator->GetRecorder()->RecordInvalidateAllocation(
18671  allocator->GetCurrentFrameIndex(),
18672  allocation, offset, size);
18673  }
18674 #endif
18675 
18676  return res;
18677 }
18678 
18679 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
18680  VmaAllocator allocator,
18681  uint32_t allocationCount,
18682  const VmaAllocation* allocations,
18683  const VkDeviceSize* offsets,
18684  const VkDeviceSize* sizes)
18685 {
18686  VMA_ASSERT(allocator);
18687 
18688  if(allocationCount == 0)
18689  {
18690  return VK_SUCCESS;
18691  }
18692 
18693  VMA_ASSERT(allocations);
18694 
18695  VMA_DEBUG_LOG("vmaFlushAllocations");
18696 
18697  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18698 
18699  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
18700 
18701 #if VMA_RECORDING_ENABLED
18702  if(allocator->GetRecorder() != VMA_NULL)
18703  {
18704  //TODO
18705  }
18706 #endif
18707 
18708  return res;
18709 }
18710 
18711 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
18712  VmaAllocator allocator,
18713  uint32_t allocationCount,
18714  const VmaAllocation* allocations,
18715  const VkDeviceSize* offsets,
18716  const VkDeviceSize* sizes)
18717 {
18718  VMA_ASSERT(allocator);
18719 
18720  if(allocationCount == 0)
18721  {
18722  return VK_SUCCESS;
18723  }
18724 
18725  VMA_ASSERT(allocations);
18726 
18727  VMA_DEBUG_LOG("vmaInvalidateAllocations");
18728 
18729  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18730 
18731  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
18732 
18733 #if VMA_RECORDING_ENABLED
18734  if(allocator->GetRecorder() != VMA_NULL)
18735  {
18736  //TODO
18737  }
18738 #endif
18739 
18740  return res;
18741 }
18742 
18743 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
18744 {
18745  VMA_ASSERT(allocator);
18746 
18747  VMA_DEBUG_LOG("vmaCheckCorruption");
18748 
18749  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18750 
18751  return allocator->CheckCorruption(memoryTypeBits);
18752 }
18753 
18754 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
18755  VmaAllocator allocator,
18756  const VmaAllocation* pAllocations,
18757  size_t allocationCount,
18758  VkBool32* pAllocationsChanged,
18759  const VmaDefragmentationInfo *pDefragmentationInfo,
18760  VmaDefragmentationStats* pDefragmentationStats)
18761 {
18762  // Deprecated interface, reimplemented using new one.
18763 
18764  VmaDefragmentationInfo2 info2 = {};
18765  info2.allocationCount = (uint32_t)allocationCount;
18766  info2.pAllocations = pAllocations;
18767  info2.pAllocationsChanged = pAllocationsChanged;
18768  if(pDefragmentationInfo != VMA_NULL)
18769  {
18770  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
18771  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
18772  }
18773  else
18774  {
18775  info2.maxCpuAllocationsToMove = UINT32_MAX;
18776  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
18777  }
18778  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
18779 
18781  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
18782  if(res == VK_NOT_READY)
18783  {
18784  res = vmaDefragmentationEnd( allocator, ctx);
18785  }
18786  return res;
18787 }
18788 
18789 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
18790  VmaAllocator allocator,
18791  const VmaDefragmentationInfo2* pInfo,
18792  VmaDefragmentationStats* pStats,
18793  VmaDefragmentationContext *pContext)
18794 {
18795  VMA_ASSERT(allocator && pInfo && pContext);
18796 
18797  // Degenerate case: Nothing to defragment.
18798  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
18799  {
18800  return VK_SUCCESS;
18801  }
18802 
18803  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
18804  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
18805  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
18806  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
18807 
18808  VMA_DEBUG_LOG("vmaDefragmentationBegin");
18809 
18810  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18811 
18812  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
18813 
18814 #if VMA_RECORDING_ENABLED
18815  if(allocator->GetRecorder() != VMA_NULL)
18816  {
18817  allocator->GetRecorder()->RecordDefragmentationBegin(
18818  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
18819  }
18820 #endif
18821 
18822  return res;
18823 }
18824 
18825 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
18826  VmaAllocator allocator,
18827  VmaDefragmentationContext context)
18828 {
18829  VMA_ASSERT(allocator);
18830 
18831  VMA_DEBUG_LOG("vmaDefragmentationEnd");
18832 
18833  if(context != VK_NULL_HANDLE)
18834  {
18835  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18836 
18837 #if VMA_RECORDING_ENABLED
18838  if(allocator->GetRecorder() != VMA_NULL)
18839  {
18840  allocator->GetRecorder()->RecordDefragmentationEnd(
18841  allocator->GetCurrentFrameIndex(), context);
18842  }
18843 #endif
18844 
18845  return allocator->DefragmentationEnd(context);
18846  }
18847  else
18848  {
18849  return VK_SUCCESS;
18850  }
18851 }
18852 
18853 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
18854  VmaAllocator allocator,
18855  VmaDefragmentationContext context,
18857  )
18858 {
18859  VMA_ASSERT(allocator);
18860  VMA_ASSERT(pInfo);
18861 
18862  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
18863 
18864  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18865 
18866  if(context == VK_NULL_HANDLE)
18867  {
18868  pInfo->moveCount = 0;
18869  return VK_SUCCESS;
18870  }
18871 
18872  return allocator->DefragmentationPassBegin(pInfo, context);
18873 }
18874 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
18875  VmaAllocator allocator,
18876  VmaDefragmentationContext context)
18877 {
18878  VMA_ASSERT(allocator);
18879 
18880  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
18881  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18882 
18883  if(context == VK_NULL_HANDLE)
18884  return VK_SUCCESS;
18885 
18886  return allocator->DefragmentationPassEnd(context);
18887 }
18888 
18889 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
18890  VmaAllocator allocator,
18891  VmaAllocation allocation,
18892  VkBuffer buffer)
18893 {
18894  VMA_ASSERT(allocator && allocation && buffer);
18895 
18896  VMA_DEBUG_LOG("vmaBindBufferMemory");
18897 
18898  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18899 
18900  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
18901 }
18902 
18903 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
18904  VmaAllocator allocator,
18905  VmaAllocation allocation,
18906  VkDeviceSize allocationLocalOffset,
18907  VkBuffer buffer,
18908  const void* pNext)
18909 {
18910  VMA_ASSERT(allocator && allocation && buffer);
18911 
18912  VMA_DEBUG_LOG("vmaBindBufferMemory2");
18913 
18914  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18915 
18916  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
18917 }
18918 
18919 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
18920  VmaAllocator allocator,
18921  VmaAllocation allocation,
18922  VkImage image)
18923 {
18924  VMA_ASSERT(allocator && allocation && image);
18925 
18926  VMA_DEBUG_LOG("vmaBindImageMemory");
18927 
18928  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18929 
18930  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
18931 }
18932 
18933 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
18934  VmaAllocator allocator,
18935  VmaAllocation allocation,
18936  VkDeviceSize allocationLocalOffset,
18937  VkImage image,
18938  const void* pNext)
18939 {
18940  VMA_ASSERT(allocator && allocation && image);
18941 
18942  VMA_DEBUG_LOG("vmaBindImageMemory2");
18943 
18944  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18945 
18946  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
18947 }
18948 
18949 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
18950  VmaAllocator allocator,
18951  const VkBufferCreateInfo* pBufferCreateInfo,
18952  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18953  VkBuffer* pBuffer,
18954  VmaAllocation* pAllocation,
18955  VmaAllocationInfo* pAllocationInfo)
18956 {
18957  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
18958 
18959  if(pBufferCreateInfo->size == 0)
18960  {
18961  return VK_ERROR_VALIDATION_FAILED_EXT;
18962  }
18963  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
18964  !allocator->m_UseKhrBufferDeviceAddress)
18965  {
18966  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
18967  return VK_ERROR_VALIDATION_FAILED_EXT;
18968  }
18969 
18970  VMA_DEBUG_LOG("vmaCreateBuffer");
18971 
18972  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18973 
18974  *pBuffer = VK_NULL_HANDLE;
18975  *pAllocation = VK_NULL_HANDLE;
18976 
18977  // 1. Create VkBuffer.
18978  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
18979  allocator->m_hDevice,
18980  pBufferCreateInfo,
18981  allocator->GetAllocationCallbacks(),
18982  pBuffer);
18983  if(res >= 0)
18984  {
18985  // 2. vkGetBufferMemoryRequirements.
18986  VkMemoryRequirements vkMemReq = {};
18987  bool requiresDedicatedAllocation = false;
18988  bool prefersDedicatedAllocation = false;
18989  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
18990  requiresDedicatedAllocation, prefersDedicatedAllocation);
18991 
18992  // 3. Allocate memory using allocator.
18993  res = allocator->AllocateMemory(
18994  vkMemReq,
18995  requiresDedicatedAllocation,
18996  prefersDedicatedAllocation,
18997  *pBuffer, // dedicatedBuffer
18998  pBufferCreateInfo->usage, // dedicatedBufferUsage
18999  VK_NULL_HANDLE, // dedicatedImage
19000  *pAllocationCreateInfo,
19001  VMA_SUBALLOCATION_TYPE_BUFFER,
19002  1, // allocationCount
19003  pAllocation);
19004 
19005 #if VMA_RECORDING_ENABLED
19006  if(allocator->GetRecorder() != VMA_NULL)
19007  {
19008  allocator->GetRecorder()->RecordCreateBuffer(
19009  allocator->GetCurrentFrameIndex(),
19010  *pBufferCreateInfo,
19011  *pAllocationCreateInfo,
19012  *pAllocation);
19013  }
19014 #endif
19015 
19016  if(res >= 0)
19017  {
19018  // 3. Bind buffer with memory.
19019  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19020  {
19021  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
19022  }
19023  if(res >= 0)
19024  {
19025  // All steps succeeded.
19026  #if VMA_STATS_STRING_ENABLED
19027  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
19028  #endif
19029  if(pAllocationInfo != VMA_NULL)
19030  {
19031  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19032  }
19033 
19034  return VK_SUCCESS;
19035  }
19036  allocator->FreeMemory(
19037  1, // allocationCount
19038  pAllocation);
19039  *pAllocation = VK_NULL_HANDLE;
19040  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19041  *pBuffer = VK_NULL_HANDLE;
19042  return res;
19043  }
19044  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19045  *pBuffer = VK_NULL_HANDLE;
19046  return res;
19047  }
19048  return res;
19049 }
19050 
19051 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
19052  VmaAllocator allocator,
19053  VkBuffer buffer,
19054  VmaAllocation allocation)
19055 {
19056  VMA_ASSERT(allocator);
19057 
19058  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19059  {
19060  return;
19061  }
19062 
19063  VMA_DEBUG_LOG("vmaDestroyBuffer");
19064 
19065  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19066 
19067 #if VMA_RECORDING_ENABLED
19068  if(allocator->GetRecorder() != VMA_NULL)
19069  {
19070  allocator->GetRecorder()->RecordDestroyBuffer(
19071  allocator->GetCurrentFrameIndex(),
19072  allocation);
19073  }
19074 #endif
19075 
19076  if(buffer != VK_NULL_HANDLE)
19077  {
19078  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
19079  }
19080 
19081  if(allocation != VK_NULL_HANDLE)
19082  {
19083  allocator->FreeMemory(
19084  1, // allocationCount
19085  &allocation);
19086  }
19087 }
19088 
19089 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
19090  VmaAllocator allocator,
19091  const VkImageCreateInfo* pImageCreateInfo,
19092  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19093  VkImage* pImage,
19094  VmaAllocation* pAllocation,
19095  VmaAllocationInfo* pAllocationInfo)
19096 {
19097  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
19098 
19099  if(pImageCreateInfo->extent.width == 0 ||
19100  pImageCreateInfo->extent.height == 0 ||
19101  pImageCreateInfo->extent.depth == 0 ||
19102  pImageCreateInfo->mipLevels == 0 ||
19103  pImageCreateInfo->arrayLayers == 0)
19104  {
19105  return VK_ERROR_VALIDATION_FAILED_EXT;
19106  }
19107 
19108  VMA_DEBUG_LOG("vmaCreateImage");
19109 
19110  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19111 
19112  *pImage = VK_NULL_HANDLE;
19113  *pAllocation = VK_NULL_HANDLE;
19114 
19115  // 1. Create VkImage.
19116  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
19117  allocator->m_hDevice,
19118  pImageCreateInfo,
19119  allocator->GetAllocationCallbacks(),
19120  pImage);
19121  if(res >= 0)
19122  {
19123  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
19124  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
19125  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
19126 
19127  // 2. Allocate memory using allocator.
19128  VkMemoryRequirements vkMemReq = {};
19129  bool requiresDedicatedAllocation = false;
19130  bool prefersDedicatedAllocation = false;
19131  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
19132  requiresDedicatedAllocation, prefersDedicatedAllocation);
19133 
19134  res = allocator->AllocateMemory(
19135  vkMemReq,
19136  requiresDedicatedAllocation,
19137  prefersDedicatedAllocation,
19138  VK_NULL_HANDLE, // dedicatedBuffer
19139  UINT32_MAX, // dedicatedBufferUsage
19140  *pImage, // dedicatedImage
19141  *pAllocationCreateInfo,
19142  suballocType,
19143  1, // allocationCount
19144  pAllocation);
19145 
19146 #if VMA_RECORDING_ENABLED
19147  if(allocator->GetRecorder() != VMA_NULL)
19148  {
19149  allocator->GetRecorder()->RecordCreateImage(
19150  allocator->GetCurrentFrameIndex(),
19151  *pImageCreateInfo,
19152  *pAllocationCreateInfo,
19153  *pAllocation);
19154  }
19155 #endif
19156 
19157  if(res >= 0)
19158  {
19159  // 3. Bind image with memory.
19160  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19161  {
19162  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
19163  }
19164  if(res >= 0)
19165  {
19166  // All steps succeeded.
19167  #if VMA_STATS_STRING_ENABLED
19168  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
19169  #endif
19170  if(pAllocationInfo != VMA_NULL)
19171  {
19172  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19173  }
19174 
19175  return VK_SUCCESS;
19176  }
19177  allocator->FreeMemory(
19178  1, // allocationCount
19179  pAllocation);
19180  *pAllocation = VK_NULL_HANDLE;
19181  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19182  *pImage = VK_NULL_HANDLE;
19183  return res;
19184  }
19185  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19186  *pImage = VK_NULL_HANDLE;
19187  return res;
19188  }
19189  return res;
19190 }
19191 
19192 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
19193  VmaAllocator allocator,
19194  VkImage image,
19195  VmaAllocation allocation)
19196 {
19197  VMA_ASSERT(allocator);
19198 
19199  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19200  {
19201  return;
19202  }
19203 
19204  VMA_DEBUG_LOG("vmaDestroyImage");
19205 
19206  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19207 
19208 #if VMA_RECORDING_ENABLED
19209  if(allocator->GetRecorder() != VMA_NULL)
19210  {
19211  allocator->GetRecorder()->RecordDestroyImage(
19212  allocator->GetCurrentFrameIndex(),
19213  allocation);
19214  }
19215 #endif
19216 
19217  if(image != VK_NULL_HANDLE)
19218  {
19219  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
19220  }
19221  if(allocation != VK_NULL_HANDLE)
19222  {
19223  allocator->FreeMemory(
19224  1, // allocationCount
19225  &allocation);
19226  }
19227 }
19228 
19229 #endif // #ifdef VMA_IMPLEMENTATION
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2374
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2331
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:2219
VMA_RECORD_FLAG_BITS_MAX_ENUM
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2368
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2329
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2394
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2991
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3609
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2420
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2282
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2593
VmaDefragmentationPassMoveInfo::memory
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3677
VmaDefragmentationInfo2::pPools
const VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3643
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2737
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:3063
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2820
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:2230
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3069
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2800
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2360
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:2215
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2787
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2400
VMA_RECORD_FLUSH_AFTER_CALL_BIT
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2366
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2334
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:3206
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2560
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2894
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2566
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2333
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:2031
VmaDefragmentationPassMoveInfo::offset
VkDeviceSize offset
Definition: vk_mem_alloc.h:3678
VmaDefragmentationPassInfo::pMoves
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3687
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2831
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaAllocatorInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2492
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2617
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2345
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2389
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2761
VmaAllocatorInfo::device
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2502
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3595
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:3082
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2824
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2255
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3704
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2815
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2676
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, const VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2556
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:3019
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:3031
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3627
VmaDefragmentationPassMoveInfo
Definition: vk_mem_alloc.h:3675
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:3072
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2322
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2320
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3593
VmaAllocationInfo::offset
VkDeviceSize offset
Offset in VkDeviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:3216
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2743
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2330
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:3012
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
vmaInvalidateAllocations
VkResult vmaInvalidateAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Invalidates memory of given set of allocations.
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2887
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2574
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2750
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2270
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3606
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2338
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3658
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3708
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:3066
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2332
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2370
vmaFlushAllocation
VkResult vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VMA_MEMORY_USAGE_CPU_ONLY
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2707
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2729
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3594
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2458
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2337
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2339
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3672
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2571
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:3036
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2472
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2554
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3712
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, const VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
VmaDefragmentationPassInfo::moveCount
uint32_t moveCount
Definition: vk_mem_alloc.h:3686
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2697
vmaBeginDefragmentationPass
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaFlushAllocations
VkResult vmaFlushAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Flushes memory of given set of allocations.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2564
VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2318
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2562
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2403
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:2225
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:3044
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:3016
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:2217
VmaPool
Represents custom memory pool.
VMA_MEMORY_USAGE_GPU_TO_CPU
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2723
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2794
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:3022
VMA_MEMORY_USAGE_MAX_ENUM
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2739
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2558
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2336
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3603
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3699
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2596
VmaAllocatorInfo
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2487
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3010
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2868
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2841
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2565
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2406
VMA_ALLOCATION_CREATE_STRATEGY_MASK
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2845
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2397
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2445
VmaDefragmentationPassMoveInfo::allocation
VmaAllocation allocation
Definition: vk_mem_alloc.h:3676
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2628
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VmaDefragmentationPassInfo
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2328
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:3236
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2391
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3597
VmaDefragmentationInfo2::pAllocations
const VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3618
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2343
VmaDeviceMemoryCallbacks::pUserData
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:2221
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2566
VMA_MEMORY_USAGE_CPU_TO_GPU
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2714
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2838
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2835
VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2300
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2863
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2565
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:3227
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2376
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2335
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:3241
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2974
vmaInvalidateAllocation
VkResult vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2573
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null.
Definition: vk_mem_alloc.h:2451
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:3085
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3685
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2572
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2852
VmaAllocatorCreateInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2463
VMA_MEMORY_USAGE_UNKNOWN
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2680
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3663
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2342
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:3058
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2344
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3648
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:3197
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2774
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2341
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:2194
vmaGetAllocatorInfo
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3075
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2956
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3710
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2566
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2805
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:2201
VmaDefragmentationPassMoveInfo
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2857
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2340
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2855
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2873
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
vmaEndDefragmentationPass
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3624
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3714
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2881
VmaAllocatorInfo::physicalDevice
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2497
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3716
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2384
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2565
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:3192
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:3002
VmaAllocatorInfo
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2607
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2850
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VMA_POOL_CREATE_ALGORITHM_MASK
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:3006
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3653
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, const VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3694
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2811
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.