Vulkan Memory Allocator
vk_mem_alloc.h
Go to the documentation of this file.
1 //
2 // Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved.
3 //
4 // Permission is hereby granted, free of charge, to any person obtaining a copy
5 // of this software and associated documentation files (the "Software"), to deal
6 // in the Software without restriction, including without limitation the rights
7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 // copies of the Software, and to permit persons to whom the Software is
9 // furnished to do so, subject to the following conditions:
10 //
11 // The above copyright notice and this permission notice shall be included in
12 // all copies or substantial portions of the Software.
13 //
14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 // THE SOFTWARE.
21 //
22 
23 #ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
24 #define AMD_VULKAN_MEMORY_ALLOCATOR_H
25 
2003 #ifdef __cplusplus
2004 extern "C" {
2005 #endif
2006 
2007 /*
2008 Define this macro to 0/1 to disable/enable support for recording functionality,
2009 available through VmaAllocatorCreateInfo::pRecordSettings.
2010 */
2011 #ifndef VMA_RECORDING_ENABLED
2012  #define VMA_RECORDING_ENABLED 0
2013 #endif
2014 
2015 #if !defined(NOMINMAX) && defined(VMA_IMPLEMENTATION)
2016  #define NOMINMAX // For windows.h
2017 #endif
2018 
2019 #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
2020  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
2021  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
2022  extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
2023  extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
2024  extern PFN_vkAllocateMemory vkAllocateMemory;
2025  extern PFN_vkFreeMemory vkFreeMemory;
2026  extern PFN_vkMapMemory vkMapMemory;
2027  extern PFN_vkUnmapMemory vkUnmapMemory;
2028  extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
2029  extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
2030  extern PFN_vkBindBufferMemory vkBindBufferMemory;
2031  extern PFN_vkBindImageMemory vkBindImageMemory;
2032  extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
2033  extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
2034  extern PFN_vkCreateBuffer vkCreateBuffer;
2035  extern PFN_vkDestroyBuffer vkDestroyBuffer;
2036  extern PFN_vkCreateImage vkCreateImage;
2037  extern PFN_vkDestroyImage vkDestroyImage;
2038  extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
2039  #if VMA_VULKAN_VERSION >= 1001000
2040  extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
2041  extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
2042  extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
2043  extern PFN_vkBindImageMemory2 vkBindImageMemory2;
2044  extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
2045  #endif // #if VMA_VULKAN_VERSION >= 1001000
2046 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
2047 
2048 #ifndef VULKAN_H_
2049  #include <vulkan/vulkan.h>
2050 #endif
2051 
2052 // Define this macro to declare maximum supported Vulkan version in format AAABBBCCC,
2053 // where AAA = major, BBB = minor, CCC = patch.
2054 // If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion.
2055 #if !defined(VMA_VULKAN_VERSION)
2056  #if defined(VK_VERSION_1_2)
2057  #define VMA_VULKAN_VERSION 1002000
2058  #elif defined(VK_VERSION_1_1)
2059  #define VMA_VULKAN_VERSION 1001000
2060  #else
2061  #define VMA_VULKAN_VERSION 1000000
2062  #endif
2063 #endif
2064 
2065 #if !defined(VMA_DEDICATED_ALLOCATION)
2066  #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
2067  #define VMA_DEDICATED_ALLOCATION 1
2068  #else
2069  #define VMA_DEDICATED_ALLOCATION 0
2070  #endif
2071 #endif
2072 
2073 #if !defined(VMA_BIND_MEMORY2)
2074  #if VK_KHR_bind_memory2
2075  #define VMA_BIND_MEMORY2 1
2076  #else
2077  #define VMA_BIND_MEMORY2 0
2078  #endif
2079 #endif
2080 
2081 #if !defined(VMA_MEMORY_BUDGET)
2082  #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
2083  #define VMA_MEMORY_BUDGET 1
2084  #else
2085  #define VMA_MEMORY_BUDGET 0
2086  #endif
2087 #endif
2088 
2089 // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
2090 #if !defined(VMA_BUFFER_DEVICE_ADDRESS)
2091  #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
2092  #define VMA_BUFFER_DEVICE_ADDRESS 1
2093  #else
2094  #define VMA_BUFFER_DEVICE_ADDRESS 0
2095  #endif
2096 #endif
2097 
2098 // Define these macros to decorate all public functions with additional code,
2099 // before and after returned type, appropriately. This may be useful for
2100 // exporting the functions when compiling VMA as a separate library. Example:
2101 // #define VMA_CALL_PRE __declspec(dllexport)
2102 // #define VMA_CALL_POST __cdecl
2103 #ifndef VMA_CALL_PRE
2104  #define VMA_CALL_PRE
2105 #endif
2106 #ifndef VMA_CALL_POST
2107  #define VMA_CALL_POST
2108 #endif
2109 
2110 // Define this macro to decorate pointers with an attribute specifying the
2111 // length of the array they point to if they are not null.
2112 //
2113 // The length may be one of
2114 // - The name of another parameter in the argument list where the pointer is declared
2115 // - The name of another member in the struct where the pointer is declared
2116 // - The name of a member of a struct type, meaning the value of that member in
2117 // the context of the call. For example
2118 // VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
2119 // this means the number of memory heaps available in the device associated
2120 // with the VmaAllocator being dealt with.
2121 #ifndef VMA_LEN_IF_NOT_NULL
2122  #define VMA_LEN_IF_NOT_NULL(len)
2123 #endif
2124 
2125 // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
2126 // see: https://clang.llvm.org/docs/AttributeReference.html#nullable
2127 #ifndef VMA_NULLABLE
2128  #ifdef __clang__
2129  #define VMA_NULLABLE _Nullable
2130  #else
2131  #define VMA_NULLABLE
2132  #endif
2133 #endif
2134 
2135 // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
2136 // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
2137 #ifndef VMA_NOT_NULL
2138  #ifdef __clang__
2139  #define VMA_NOT_NULL _Nonnull
2140  #else
2141  #define VMA_NOT_NULL
2142  #endif
2143 #endif
2144 
2145 // If non-dispatchable handles are represented as pointers then we can give
2146 // then nullability annotations
2147 #ifndef VMA_NOT_NULL_NON_DISPATCHABLE
2148  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2149  #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
2150  #else
2151  #define VMA_NOT_NULL_NON_DISPATCHABLE
2152  #endif
2153 #endif
2154 
2155 #ifndef VMA_NULLABLE_NON_DISPATCHABLE
2156  #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
2157  #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
2158  #else
2159  #define VMA_NULLABLE_NON_DISPATCHABLE
2160  #endif
2161 #endif
2162 
2172 VK_DEFINE_HANDLE(VmaAllocator)
2173 
2174 typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)(
2176  VmaAllocator VMA_NOT_NULL allocator,
2177  uint32_t memoryType,
2178  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2179  VkDeviceSize size,
2180  void* VMA_NULLABLE pUserData);
2182 typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)(
2183  VmaAllocator VMA_NOT_NULL allocator,
2184  uint32_t memoryType,
2185  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
2186  VkDeviceSize size,
2187  void* VMA_NULLABLE pUserData);
2188 
2202  void* VMA_NULLABLE pUserData;
2204 
2300 
2303 typedef VkFlags VmaAllocatorCreateFlags;
2304 
2309 typedef struct VmaVulkanFunctions {
2310  PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
2311  PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
2312  PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
2313  PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
2314  PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
2315  PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
2316  PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
2317  PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
2318  PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
2319  PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
2320  PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
2321  PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
2322  PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
2323  PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
2324  PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
2325  PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
2326  PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
2327 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
2328  PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
2329  PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
2330 #endif
2331 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
2332  PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
2333  PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
2334 #endif
2335 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
2336  PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
2337 #endif
2339 
2341 typedef enum VmaRecordFlagBits {
2348 
2349  VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
2351 typedef VkFlags VmaRecordFlags;
2352 
2354 typedef struct VmaRecordSettings
2355 {
2365  const char* VMA_NOT_NULL pFilePath;
2367 
2370 {
2374 
2375  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2377 
2378  VkDevice VMA_NOT_NULL device;
2380 
2383 
2384  const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
2386 
2426  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
2427 
2439  const VmaRecordSettings* VMA_NULLABLE pRecordSettings;
2444  VkInstance VMA_NOT_NULL instance;
2455 
2457 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
2458  const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
2459  VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator);
2460 
2462 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
2463  VmaAllocator VMA_NULLABLE allocator);
2464 
2467 typedef struct VmaAllocatorInfo
2468 {
2473  VkInstance VMA_NOT_NULL instance;
2478  VkPhysicalDevice VMA_NOT_NULL physicalDevice;
2483  VkDevice VMA_NOT_NULL device;
2485 
2491 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
2492 
2497 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
2498  VmaAllocator VMA_NOT_NULL allocator,
2499  const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties);
2500 
2505 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
2506  VmaAllocator VMA_NOT_NULL allocator,
2507  const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
2508 
2515 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
2516  VmaAllocator VMA_NOT_NULL allocator,
2517  uint32_t memoryTypeIndex,
2518  VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
2519 
2528 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
2529  VmaAllocator VMA_NOT_NULL allocator,
2530  uint32_t frameIndex);
2531 
2534 typedef struct VmaStatInfo
2535 {
2537  uint32_t blockCount;
2543  VkDeviceSize usedBytes;
2545  VkDeviceSize unusedBytes;
2546  VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax;
2547  VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax;
2549 
2551 typedef struct VmaStats
2552 {
2553  VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES];
2554  VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS];
2557 
2567 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
2568  VmaAllocator VMA_NOT_NULL allocator,
2569  VmaStats* VMA_NOT_NULL pStats);
2570 
2573 typedef struct VmaBudget
2574 {
2577  VkDeviceSize blockBytes;
2578 
2588  VkDeviceSize allocationBytes;
2589 
2598  VkDeviceSize usage;
2599 
2609  VkDeviceSize budget;
2611 
2622 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
2623  VmaAllocator VMA_NOT_NULL allocator,
2624  VmaBudget* VMA_NOT_NULL pBudget);
2625 
2626 #ifndef VMA_STATS_STRING_ENABLED
2627 #define VMA_STATS_STRING_ENABLED 1
2628 #endif
2629 
2630 #if VMA_STATS_STRING_ENABLED
2631 
2633 
2635 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
2636  VmaAllocator VMA_NOT_NULL allocator,
2637  char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString,
2638  VkBool32 detailedMap);
2639 
2640 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
2641  VmaAllocator VMA_NOT_NULL allocator,
2642  char* VMA_NULLABLE pStatsString);
2643 
2644 #endif // #if VMA_STATS_STRING_ENABLED
2645 
2654 VK_DEFINE_HANDLE(VmaPool)
2655 
2656 typedef enum VmaMemoryUsage
2657 {
2719 
2720  VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
2722 
2732 
2797 
2813 
2823 
2830 
2834 
2836 {
2849  VkMemoryPropertyFlags requiredFlags;
2854  VkMemoryPropertyFlags preferredFlags;
2862  uint32_t memoryTypeBits;
2868  VmaPool VMA_NULLABLE pool;
2875  void* VMA_NULLABLE pUserData;
2877 
2894 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
2895  VmaAllocator VMA_NOT_NULL allocator,
2896  uint32_t memoryTypeBits,
2897  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2898  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2899 
2912 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
2913  VmaAllocator VMA_NOT_NULL allocator,
2914  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
2915  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2916  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2917 
2930 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
2931  VmaAllocator VMA_NOT_NULL allocator,
2932  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
2933  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
2934  uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
2935 
2956 
2973 
2984 
2990 
2993 typedef VkFlags VmaPoolCreateFlags;
2994 
2997 typedef struct VmaPoolCreateInfo {
3012  VkDeviceSize blockSize;
3041 
3044 typedef struct VmaPoolStats {
3047  VkDeviceSize size;
3050  VkDeviceSize unusedSize;
3063  VkDeviceSize unusedRangeSizeMax;
3066  size_t blockCount;
3068 
3075 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
3076  VmaAllocator VMA_NOT_NULL allocator,
3077  const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
3078  VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool);
3079 
3082 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
3083  VmaAllocator VMA_NOT_NULL allocator,
3084  VmaPool VMA_NULLABLE pool);
3085 
3092 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
3093  VmaAllocator VMA_NOT_NULL allocator,
3094  VmaPool VMA_NOT_NULL pool,
3095  VmaPoolStats* VMA_NOT_NULL pPoolStats);
3096 
3103 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
3104  VmaAllocator VMA_NOT_NULL allocator,
3105  VmaPool VMA_NOT_NULL pool,
3106  size_t* VMA_NULLABLE pLostAllocationCount);
3107 
3122 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool);
3123 
3130 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
3131  VmaAllocator VMA_NOT_NULL allocator,
3132  VmaPool VMA_NOT_NULL pool,
3133  const char* VMA_NULLABLE * VMA_NOT_NULL ppName);
3134 
3140 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
3141  VmaAllocator VMA_NOT_NULL allocator,
3142  VmaPool VMA_NOT_NULL pool,
3143  const char* VMA_NULLABLE pName);
3144 
3169 VK_DEFINE_HANDLE(VmaAllocation)
3170 
3171 
3173 typedef struct VmaAllocationInfo {
3178  uint32_t memoryType;
3187  VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
3192  VkDeviceSize offset;
3203  VkDeviceSize size;
3212  void* VMA_NULLABLE pMappedData;
3217  void* VMA_NULLABLE pUserData;
3219 
3230 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
3231  VmaAllocator VMA_NOT_NULL allocator,
3232  const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
3233  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3234  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3235  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3236 
3256 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
3257  VmaAllocator VMA_NOT_NULL allocator,
3258  const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
3259  const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
3260  size_t allocationCount,
3261  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3262  VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
3263 
3270 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
3271  VmaAllocator VMA_NOT_NULL allocator,
3272  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3273  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3274  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3275  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3276 
3278 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
3279  VmaAllocator VMA_NOT_NULL allocator,
3280  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3281  const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
3282  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3283  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3284 
3289 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
3290  VmaAllocator VMA_NOT_NULL allocator,
3291  const VmaAllocation VMA_NULLABLE allocation);
3292 
3303 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
3304  VmaAllocator VMA_NOT_NULL allocator,
3305  size_t allocationCount,
3306  const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
3307 
3315 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
3316  VmaAllocator VMA_NOT_NULL allocator,
3317  VmaAllocation VMA_NOT_NULL allocation,
3318  VkDeviceSize newSize);
3319 
3336 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
3337  VmaAllocator VMA_NOT_NULL allocator,
3338  VmaAllocation VMA_NOT_NULL allocation,
3339  VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
3340 
3355 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
3356  VmaAllocator VMA_NOT_NULL allocator,
3357  VmaAllocation VMA_NOT_NULL allocation);
3358 
3372 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
3373  VmaAllocator VMA_NOT_NULL allocator,
3374  VmaAllocation VMA_NOT_NULL allocation,
3375  void* VMA_NULLABLE pUserData);
3376 
3387 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
3388  VmaAllocator VMA_NOT_NULL allocator,
3389  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation);
3390 
3429 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
3430  VmaAllocator VMA_NOT_NULL allocator,
3431  VmaAllocation VMA_NOT_NULL allocation,
3432  void* VMA_NULLABLE * VMA_NOT_NULL ppData);
3433 
3442 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
3443  VmaAllocator VMA_NOT_NULL allocator,
3444  VmaAllocation VMA_NOT_NULL allocation);
3445 
3467 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
3468  VmaAllocator VMA_NOT_NULL allocator,
3469  VmaAllocation VMA_NOT_NULL allocation,
3470  VkDeviceSize offset,
3471  VkDeviceSize size);
3472 
3494 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
3495  VmaAllocator VMA_NOT_NULL allocator,
3496  VmaAllocation VMA_NOT_NULL allocation,
3497  VkDeviceSize offset,
3498  VkDeviceSize size);
3499 
3514 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
3515  VmaAllocator VMA_NOT_NULL allocator,
3516  uint32_t allocationCount,
3517  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3518  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3519  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3520 
3535 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
3536  VmaAllocator VMA_NOT_NULL allocator,
3537  uint32_t allocationCount,
3538  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
3539  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
3540  const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
3541 
3558 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits);
3559 
3566 VK_DEFINE_HANDLE(VmaDefragmentationContext)
3567 
3568 typedef enum VmaDefragmentationFlagBits {
3573 typedef VkFlags VmaDefragmentationFlags;
3574 
3579 typedef struct VmaDefragmentationInfo2 {
3594  const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations;
3600  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged;
3603  uint32_t poolCount;
3619  const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools;
3624  VkDeviceSize maxCpuBytesToMove;
3634  VkDeviceSize maxGpuBytesToMove;
3648  VkCommandBuffer VMA_NULLABLE commandBuffer;
3650 
3653  VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory;
3654  VkDeviceSize offset;
3656 
3662  uint32_t moveCount;
3663  VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
3665 
3670 typedef struct VmaDefragmentationInfo {
3675  VkDeviceSize maxBytesToMove;
3682 
3684 typedef struct VmaDefragmentationStats {
3686  VkDeviceSize bytesMoved;
3688  VkDeviceSize bytesFreed;
3694 
3724 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
3725  VmaAllocator VMA_NOT_NULL allocator,
3726  const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo,
3727  VmaDefragmentationStats* VMA_NULLABLE pStats,
3728  VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext);
3729 
3735 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
3736  VmaAllocator VMA_NOT_NULL allocator,
3737  VmaDefragmentationContext VMA_NULLABLE context);
3738 
3739 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
3740  VmaAllocator VMA_NOT_NULL allocator,
3741  VmaDefragmentationContext VMA_NULLABLE context,
3742  VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo
3743 );
3744 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
3745  VmaAllocator VMA_NOT_NULL allocator,
3746  VmaDefragmentationContext VMA_NULLABLE context
3747 );
3748 
3789 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
3790  VmaAllocator VMA_NOT_NULL allocator,
3791  const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
3792  size_t allocationCount,
3793  VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged,
3794  const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo,
3795  VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats);
3796 
3809 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
3810  VmaAllocator VMA_NOT_NULL allocator,
3811  VmaAllocation VMA_NOT_NULL allocation,
3812  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
3813 
3824 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
3825  VmaAllocator VMA_NOT_NULL allocator,
3826  VmaAllocation VMA_NOT_NULL allocation,
3827  VkDeviceSize allocationLocalOffset,
3828  VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
3829  const void* VMA_NULLABLE pNext);
3830 
3843 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
3844  VmaAllocator VMA_NOT_NULL allocator,
3845  VmaAllocation VMA_NOT_NULL allocation,
3846  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
3847 
3858 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
3859  VmaAllocator VMA_NOT_NULL allocator,
3860  VmaAllocation VMA_NOT_NULL allocation,
3861  VkDeviceSize allocationLocalOffset,
3862  VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
3863  const void* VMA_NULLABLE pNext);
3864 
3891 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
3892  VmaAllocator VMA_NOT_NULL allocator,
3893  const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
3894  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3895  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer,
3896  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3897  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3898 
3910 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
3911  VmaAllocator VMA_NOT_NULL allocator,
3912  VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
3913  VmaAllocation VMA_NULLABLE allocation);
3914 
3916 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
3917  VmaAllocator VMA_NOT_NULL allocator,
3918  const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
3919  const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
3920  VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage,
3921  VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation,
3922  VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
3923 
3935 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
3936  VmaAllocator VMA_NOT_NULL allocator,
3937  VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
3938  VmaAllocation VMA_NULLABLE allocation);
3939 
3940 #ifdef __cplusplus
3941 }
3942 #endif
3943 
3944 #endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
3945 
3946 // For Visual Studio IntelliSense.
3947 #if defined(__cplusplus) && defined(__INTELLISENSE__)
3948 #define VMA_IMPLEMENTATION
3949 #endif
3950 
3951 #ifdef VMA_IMPLEMENTATION
3952 #undef VMA_IMPLEMENTATION
3953 
3954 #include <cstdint>
3955 #include <cstdlib>
3956 #include <cstring>
3957 #include <utility>
3958 
3959 #if VMA_RECORDING_ENABLED
3960  #include <chrono>
3961  #if defined(_WIN32)
3962  #include <windows.h>
3963  #else
3964  #include <sstream>
3965  #include <thread>
3966  #endif
3967 #endif
3968 
3969 /*******************************************************************************
3970 CONFIGURATION SECTION
3971 
3972 Define some of these macros before each #include of this header or change them
3973 here if you need other then default behavior depending on your environment.
3974 */
3975 
3976 /*
3977 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3978 internally, like:
3979 
3980  vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
3981 */
3982 #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
3983  #define VMA_STATIC_VULKAN_FUNCTIONS 1
3984 #endif
3985 
3986 /*
3987 Define this macro to 1 to make the library fetch pointers to Vulkan functions
3988 internally, like:
3989 
3990  vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory);
3991 */
3992 #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
3993  #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
3994  #if defined(VK_NO_PROTOTYPES)
3995  extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
3996  extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
3997  #endif
3998 #endif
3999 
4000 // Define this macro to 1 to make the library use STL containers instead of its own implementation.
4001 //#define VMA_USE_STL_CONTAINERS 1
4002 
4003 /* Set this macro to 1 to make the library including and using STL containers:
4004 std::pair, std::vector, std::list, std::unordered_map.
4005 
4006 Set it to 0 or undefined to make the library using its own implementation of
4007 the containers.
4008 */
4009 #if VMA_USE_STL_CONTAINERS
4010  #define VMA_USE_STL_VECTOR 1
4011  #define VMA_USE_STL_UNORDERED_MAP 1
4012  #define VMA_USE_STL_LIST 1
4013 #endif
4014 
4015 #ifndef VMA_USE_STL_SHARED_MUTEX
4016  // Compiler conforms to C++17.
4017  #if __cplusplus >= 201703L
4018  #define VMA_USE_STL_SHARED_MUTEX 1
4019  // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
4020  // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
4021  // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/
4022  #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
4023  #define VMA_USE_STL_SHARED_MUTEX 1
4024  #else
4025  #define VMA_USE_STL_SHARED_MUTEX 0
4026  #endif
4027 #endif
4028 
4029 /*
4030 THESE INCLUDES ARE NOT ENABLED BY DEFAULT.
4031 Library has its own container implementation.
4032 */
4033 #if VMA_USE_STL_VECTOR
4034  #include <vector>
4035 #endif
4036 
4037 #if VMA_USE_STL_UNORDERED_MAP
4038  #include <unordered_map>
4039 #endif
4040 
4041 #if VMA_USE_STL_LIST
4042  #include <list>
4043 #endif
4044 
4045 /*
4046 Following headers are used in this CONFIGURATION section only, so feel free to
4047 remove them if not needed.
4048 */
4049 #include <cassert> // for assert
4050 #include <algorithm> // for min, max
4051 #include <mutex>
4052 
4053 #ifndef VMA_NULL
4054  // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
4055  #define VMA_NULL nullptr
4056 #endif
4057 
4058 #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
4059 #include <cstdlib>
4060 static void* vma_aligned_alloc(size_t alignment, size_t size)
4061 {
4062  // alignment must be >= sizeof(void*)
4063  if(alignment < sizeof(void*))
4064  {
4065  alignment = sizeof(void*);
4066  }
4067 
4068  return memalign(alignment, size);
4069 }
4070 #elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
4071 #include <cstdlib>
4072 
4073 #if defined(__APPLE__)
4074 #include <AvailabilityMacros.h>
4075 #endif
4076 
4077 static void* vma_aligned_alloc(size_t alignment, size_t size)
4078 {
4079 #if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
4080 #if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
4081  // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
4082  // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
4083  // MAC_OS_X_VERSION_10_16), even though the function is marked
4084  // availabe for 10.15. That's why the preprocessor checks for 10.16 but
4085  // the __builtin_available checks for 10.15.
4086  // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
4087  if (__builtin_available(macOS 10.15, iOS 13, *))
4088  return aligned_alloc(alignment, size);
4089 #endif
4090 #endif
4091  // alignment must be >= sizeof(void*)
4092  if(alignment < sizeof(void*))
4093  {
4094  alignment = sizeof(void*);
4095  }
4096 
4097  void *pointer;
4098  if(posix_memalign(&pointer, alignment, size) == 0)
4099  return pointer;
4100  return VMA_NULL;
4101 }
4102 #elif defined(_WIN32)
4103 static void* vma_aligned_alloc(size_t alignment, size_t size)
4104 {
4105  return _aligned_malloc(size, alignment);
4106 }
4107 #else
4108 static void* vma_aligned_alloc(size_t alignment, size_t size)
4109 {
4110  return aligned_alloc(alignment, size);
4111 }
4112 #endif
4113 
4114 #if defined(_WIN32)
4115 static void vma_aligned_free(void* ptr)
4116 {
4117  _aligned_free(ptr);
4118 }
4119 #else
4120 static void vma_aligned_free(void* ptr)
4121 {
4122  free(ptr);
4123 }
4124 #endif
4125 
4126 // If your compiler is not compatible with C++11 and definition of
4127 // aligned_alloc() function is missing, uncommeting following line may help:
4128 
4129 //#include <malloc.h>
4130 
4131 // Normal assert to check for programmer's errors, especially in Debug configuration.
4132 #ifndef VMA_ASSERT
4133  #ifdef NDEBUG
4134  #define VMA_ASSERT(expr)
4135  #else
4136  #define VMA_ASSERT(expr) assert(expr)
4137  #endif
4138 #endif
4139 
4140 // Assert that will be called very often, like inside data structures e.g. operator[].
4141 // Making it non-empty can make program slow.
4142 #ifndef VMA_HEAVY_ASSERT
4143  #ifdef NDEBUG
4144  #define VMA_HEAVY_ASSERT(expr)
4145  #else
4146  #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
4147  #endif
4148 #endif
4149 
4150 #ifndef VMA_ALIGN_OF
4151  #define VMA_ALIGN_OF(type) (__alignof(type))
4152 #endif
4153 
4154 #ifndef VMA_SYSTEM_ALIGNED_MALLOC
4155  #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
4156 #endif
4157 
4158 #ifndef VMA_SYSTEM_ALIGNED_FREE
4159  // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
4160  #if defined(VMA_SYSTEM_FREE)
4161  #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
4162  #else
4163  #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
4164  #endif
4165 #endif
4166 
4167 #ifndef VMA_MIN
4168  #define VMA_MIN(v1, v2) (std::min((v1), (v2)))
4169 #endif
4170 
4171 #ifndef VMA_MAX
4172  #define VMA_MAX(v1, v2) (std::max((v1), (v2)))
4173 #endif
4174 
4175 #ifndef VMA_SWAP
4176  #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
4177 #endif
4178 
4179 #ifndef VMA_SORT
4180  #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
4181 #endif
4182 
4183 #ifndef VMA_DEBUG_LOG
4184  #define VMA_DEBUG_LOG(format, ...)
4185  /*
4186  #define VMA_DEBUG_LOG(format, ...) do { \
4187  printf(format, __VA_ARGS__); \
4188  printf("\n"); \
4189  } while(false)
4190  */
4191 #endif
4192 
4193 // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
4194 #if VMA_STATS_STRING_ENABLED
4195  static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num)
4196  {
4197  snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
4198  }
4199  static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num)
4200  {
4201  snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
4202  }
4203  static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr)
4204  {
4205  snprintf(outStr, strLen, "%p", ptr);
4206  }
4207 #endif
4208 
4209 #ifndef VMA_MUTEX
4210  class VmaMutex
4211  {
4212  public:
4213  void Lock() { m_Mutex.lock(); }
4214  void Unlock() { m_Mutex.unlock(); }
4215  bool TryLock() { return m_Mutex.try_lock(); }
4216  private:
4217  std::mutex m_Mutex;
4218  };
4219  #define VMA_MUTEX VmaMutex
4220 #endif
4221 
4222 // Read-write mutex, where "read" is shared access, "write" is exclusive access.
4223 #ifndef VMA_RW_MUTEX
4224  #if VMA_USE_STL_SHARED_MUTEX
4225  // Use std::shared_mutex from C++17.
4226  #include <shared_mutex>
4227  class VmaRWMutex
4228  {
4229  public:
4230  void LockRead() { m_Mutex.lock_shared(); }
4231  void UnlockRead() { m_Mutex.unlock_shared(); }
4232  bool TryLockRead() { return m_Mutex.try_lock_shared(); }
4233  void LockWrite() { m_Mutex.lock(); }
4234  void UnlockWrite() { m_Mutex.unlock(); }
4235  bool TryLockWrite() { return m_Mutex.try_lock(); }
4236  private:
4237  std::shared_mutex m_Mutex;
4238  };
4239  #define VMA_RW_MUTEX VmaRWMutex
4240  #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
4241  // Use SRWLOCK from WinAPI.
4242  // Minimum supported client = Windows Vista, server = Windows Server 2008.
4243  class VmaRWMutex
4244  {
4245  public:
4246  VmaRWMutex() { InitializeSRWLock(&m_Lock); }
4247  void LockRead() { AcquireSRWLockShared(&m_Lock); }
4248  void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
4249  bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
4250  void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
4251  void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
4252  bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
4253  private:
4254  SRWLOCK m_Lock;
4255  };
4256  #define VMA_RW_MUTEX VmaRWMutex
4257  #else
4258  // Less efficient fallback: Use normal mutex.
4259  class VmaRWMutex
4260  {
4261  public:
4262  void LockRead() { m_Mutex.Lock(); }
4263  void UnlockRead() { m_Mutex.Unlock(); }
4264  bool TryLockRead() { return m_Mutex.TryLock(); }
4265  void LockWrite() { m_Mutex.Lock(); }
4266  void UnlockWrite() { m_Mutex.Unlock(); }
4267  bool TryLockWrite() { return m_Mutex.TryLock(); }
4268  private:
4269  VMA_MUTEX m_Mutex;
4270  };
4271  #define VMA_RW_MUTEX VmaRWMutex
4272  #endif // #if VMA_USE_STL_SHARED_MUTEX
4273 #endif // #ifndef VMA_RW_MUTEX
4274 
4275 /*
4276 If providing your own implementation, you need to implement a subset of std::atomic.
4277 */
4278 #ifndef VMA_ATOMIC_UINT32
4279  #include <atomic>
4280  #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
4281 #endif
4282 
4283 #ifndef VMA_ATOMIC_UINT64
4284  #include <atomic>
4285  #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
4286 #endif
4287 
4288 #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
4289 
4293  #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
4294 #endif
4295 
4296 #ifndef VMA_DEBUG_ALIGNMENT
4297 
4301  #define VMA_DEBUG_ALIGNMENT (1)
4302 #endif
4303 
4304 #ifndef VMA_DEBUG_MARGIN
4305 
4309  #define VMA_DEBUG_MARGIN (0)
4310 #endif
4311 
4312 #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
4313 
4317  #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
4318 #endif
4319 
4320 #ifndef VMA_DEBUG_DETECT_CORRUPTION
4321 
4326  #define VMA_DEBUG_DETECT_CORRUPTION (0)
4327 #endif
4328 
4329 #ifndef VMA_DEBUG_GLOBAL_MUTEX
4330 
4334  #define VMA_DEBUG_GLOBAL_MUTEX (0)
4335 #endif
4336 
4337 #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
4338 
4342  #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
4343 #endif
4344 
4345 #ifndef VMA_SMALL_HEAP_MAX_SIZE
4346  #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
4348 #endif
4349 
4350 #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
4351  #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
4353 #endif
4354 
4355 #ifndef VMA_CLASS_NO_COPY
4356  #define VMA_CLASS_NO_COPY(className) \
4357  private: \
4358  className(const className&) = delete; \
4359  className& operator=(const className&) = delete;
4360 #endif
4361 
4362 static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX;
4363 
4364 // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
4365 static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
4366 
4367 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
4368 static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
4369 
4370 /*******************************************************************************
4371 END OF CONFIGURATION
4372 */
4373 
4374 // # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
4375 
4376 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
4377 static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
4378 static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
4379 
4380 static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
4381 
4382 static VkAllocationCallbacks VmaEmptyAllocationCallbacks = {
4383  VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
4384 
4385 // Returns number of bits set to 1 in (v).
4386 static inline uint32_t VmaCountBitsSet(uint32_t v)
4387 {
4388  uint32_t c = v - ((v >> 1) & 0x55555555);
4389  c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
4390  c = ((c >> 4) + c) & 0x0F0F0F0F;
4391  c = ((c >> 8) + c) & 0x00FF00FF;
4392  c = ((c >> 16) + c) & 0x0000FFFF;
4393  return c;
4394 }
4395 
4396 /*
4397 Returns true if given number is a power of two.
4398 T must be unsigned integer number or signed integer but always nonnegative.
4399 For 0 returns true.
4400 */
4401 template <typename T>
4402 inline bool VmaIsPow2(T x)
4403 {
4404  return (x & (x-1)) == 0;
4405 }
4406 
4407 // Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
4408 // Use types like uint32_t, uint64_t as T.
4409 template <typename T>
4410 static inline T VmaAlignUp(T val, T alignment)
4411 {
4412  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4413  return (val + alignment - 1) & ~(alignment - 1);
4414 }
4415 // Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8.
4416 // Use types like uint32_t, uint64_t as T.
4417 template <typename T>
4418 static inline T VmaAlignDown(T val, T alignment)
4419 {
4420  VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
4421  return val & ~(alignment - 1);
4422 }
4423 
4424 // Division with mathematical rounding to nearest number.
4425 template <typename T>
4426 static inline T VmaRoundDiv(T x, T y)
4427 {
4428  return (x + (y / (T)2)) / y;
4429 }
4430 
4431 // Returns smallest power of 2 greater or equal to v.
4432 static inline uint32_t VmaNextPow2(uint32_t v)
4433 {
4434  v--;
4435  v |= v >> 1;
4436  v |= v >> 2;
4437  v |= v >> 4;
4438  v |= v >> 8;
4439  v |= v >> 16;
4440  v++;
4441  return v;
4442 }
4443 static inline uint64_t VmaNextPow2(uint64_t v)
4444 {
4445  v--;
4446  v |= v >> 1;
4447  v |= v >> 2;
4448  v |= v >> 4;
4449  v |= v >> 8;
4450  v |= v >> 16;
4451  v |= v >> 32;
4452  v++;
4453  return v;
4454 }
4455 
4456 // Returns largest power of 2 less or equal to v.
4457 static inline uint32_t VmaPrevPow2(uint32_t v)
4458 {
4459  v |= v >> 1;
4460  v |= v >> 2;
4461  v |= v >> 4;
4462  v |= v >> 8;
4463  v |= v >> 16;
4464  v = v ^ (v >> 1);
4465  return v;
4466 }
4467 static inline uint64_t VmaPrevPow2(uint64_t v)
4468 {
4469  v |= v >> 1;
4470  v |= v >> 2;
4471  v |= v >> 4;
4472  v |= v >> 8;
4473  v |= v >> 16;
4474  v |= v >> 32;
4475  v = v ^ (v >> 1);
4476  return v;
4477 }
4478 
4479 static inline bool VmaStrIsEmpty(const char* pStr)
4480 {
4481  return pStr == VMA_NULL || *pStr == '\0';
4482 }
4483 
4484 #if VMA_STATS_STRING_ENABLED
4485 
4486 static const char* VmaAlgorithmToStr(uint32_t algorithm)
4487 {
4488  switch(algorithm)
4489  {
4491  return "Linear";
4493  return "Buddy";
4494  case 0:
4495  return "Default";
4496  default:
4497  VMA_ASSERT(0);
4498  return "";
4499  }
4500 }
4501 
4502 #endif // #if VMA_STATS_STRING_ENABLED
4503 
4504 #ifndef VMA_SORT
4505 
4506 template<typename Iterator, typename Compare>
4507 Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp)
4508 {
4509  Iterator centerValue = end; --centerValue;
4510  Iterator insertIndex = beg;
4511  for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex)
4512  {
4513  if(cmp(*memTypeIndex, *centerValue))
4514  {
4515  if(insertIndex != memTypeIndex)
4516  {
4517  VMA_SWAP(*memTypeIndex, *insertIndex);
4518  }
4519  ++insertIndex;
4520  }
4521  }
4522  if(insertIndex != centerValue)
4523  {
4524  VMA_SWAP(*insertIndex, *centerValue);
4525  }
4526  return insertIndex;
4527 }
4528 
4529 template<typename Iterator, typename Compare>
4530 void VmaQuickSort(Iterator beg, Iterator end, Compare cmp)
4531 {
4532  if(beg < end)
4533  {
4534  Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp);
4535  VmaQuickSort<Iterator, Compare>(beg, it, cmp);
4536  VmaQuickSort<Iterator, Compare>(it + 1, end, cmp);
4537  }
4538 }
4539 
4540 #define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp)
4541 
4542 #endif // #ifndef VMA_SORT
4543 
4544 /*
4545 Returns true if two memory blocks occupy overlapping pages.
4546 ResourceA must be in less memory offset than ResourceB.
4547 
4548 Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
4549 chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
4550 */
4551 static inline bool VmaBlocksOnSamePage(
4552  VkDeviceSize resourceAOffset,
4553  VkDeviceSize resourceASize,
4554  VkDeviceSize resourceBOffset,
4555  VkDeviceSize pageSize)
4556 {
4557  VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
4558  VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
4559  VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
4560  VkDeviceSize resourceBStart = resourceBOffset;
4561  VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
4562  return resourceAEndPage == resourceBStartPage;
4563 }
4564 
4565 enum VmaSuballocationType
4566 {
4567  VMA_SUBALLOCATION_TYPE_FREE = 0,
4568  VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
4569  VMA_SUBALLOCATION_TYPE_BUFFER = 2,
4570  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
4571  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
4572  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
4573  VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
4574 };
4575 
4576 /*
4577 Returns true if given suballocation types could conflict and must respect
4578 VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
4579 or linear image and another one is optimal image. If type is unknown, behave
4580 conservatively.
4581 */
4582 static inline bool VmaIsBufferImageGranularityConflict(
4583  VmaSuballocationType suballocType1,
4584  VmaSuballocationType suballocType2)
4585 {
4586  if(suballocType1 > suballocType2)
4587  {
4588  VMA_SWAP(suballocType1, suballocType2);
4589  }
4590 
4591  switch(suballocType1)
4592  {
4593  case VMA_SUBALLOCATION_TYPE_FREE:
4594  return false;
4595  case VMA_SUBALLOCATION_TYPE_UNKNOWN:
4596  return true;
4597  case VMA_SUBALLOCATION_TYPE_BUFFER:
4598  return
4599  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4600  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4601  case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
4602  return
4603  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
4604  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
4605  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4606  case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
4607  return
4608  suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
4609  case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
4610  return false;
4611  default:
4612  VMA_ASSERT(0);
4613  return true;
4614  }
4615 }
4616 
4617 static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
4618 {
4619 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4620  uint32_t* pDst = (uint32_t*)((char*)pData + offset);
4621  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4622  for(size_t i = 0; i < numberCount; ++i, ++pDst)
4623  {
4624  *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
4625  }
4626 #else
4627  // no-op
4628 #endif
4629 }
4630 
4631 static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
4632 {
4633 #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
4634  const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
4635  const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
4636  for(size_t i = 0; i < numberCount; ++i, ++pSrc)
4637  {
4638  if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
4639  {
4640  return false;
4641  }
4642  }
4643 #endif
4644  return true;
4645 }
4646 
4647 /*
4648 Fills structure with parameters of an example buffer to be used for transfers
4649 during GPU memory defragmentation.
4650 */
4651 static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
4652 {
4653  memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
4654  outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
4655  outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
4656  outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
4657 }
4658 
4659 // Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
4660 struct VmaMutexLock
4661 {
4662  VMA_CLASS_NO_COPY(VmaMutexLock)
4663 public:
4664  VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
4665  m_pMutex(useMutex ? &mutex : VMA_NULL)
4666  { if(m_pMutex) { m_pMutex->Lock(); } }
4667  ~VmaMutexLock()
4668  { if(m_pMutex) { m_pMutex->Unlock(); } }
4669 private:
4670  VMA_MUTEX* m_pMutex;
4671 };
4672 
4673 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
4674 struct VmaMutexLockRead
4675 {
4676  VMA_CLASS_NO_COPY(VmaMutexLockRead)
4677 public:
4678  VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
4679  m_pMutex(useMutex ? &mutex : VMA_NULL)
4680  { if(m_pMutex) { m_pMutex->LockRead(); } }
4681  ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } }
4682 private:
4683  VMA_RW_MUTEX* m_pMutex;
4684 };
4685 
4686 // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
4687 struct VmaMutexLockWrite
4688 {
4689  VMA_CLASS_NO_COPY(VmaMutexLockWrite)
4690 public:
4691  VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) :
4692  m_pMutex(useMutex ? &mutex : VMA_NULL)
4693  { if(m_pMutex) { m_pMutex->LockWrite(); } }
4694  ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } }
4695 private:
4696  VMA_RW_MUTEX* m_pMutex;
4697 };
4698 
4699 #if VMA_DEBUG_GLOBAL_MUTEX
4700  static VMA_MUTEX gDebugGlobalMutex;
4701  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
4702 #else
4703  #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
4704 #endif
4705 
4706 // Minimum size of a free suballocation to register it in the free suballocation collection.
4707 static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16;
4708 
4709 /*
4710 Performs binary search and returns iterator to first element that is greater or
4711 equal to (key), according to comparison (cmp).
4712 
4713 Cmp should return true if first argument is less than second argument.
4714 
4715 Returned value is the found element, if present in the collection or place where
4716 new element with value (key) should be inserted.
4717 */
4718 template <typename CmpLess, typename IterT, typename KeyT>
4719 static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp)
4720 {
4721  size_t down = 0, up = (end - beg);
4722  while(down < up)
4723  {
4724  const size_t mid = (down + up) / 2;
4725  if(cmp(*(beg+mid), key))
4726  {
4727  down = mid + 1;
4728  }
4729  else
4730  {
4731  up = mid;
4732  }
4733  }
4734  return beg + down;
4735 }
4736 
4737 template<typename CmpLess, typename IterT, typename KeyT>
4738 IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
4739 {
4740  IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
4741  beg, end, value, cmp);
4742  if(it == end ||
4743  (!cmp(*it, value) && !cmp(value, *it)))
4744  {
4745  return it;
4746  }
4747  return end;
4748 }
4749 
4750 /*
4751 Returns true if all pointers in the array are not-null and unique.
4752 Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
4753 T must be pointer type, e.g. VmaAllocation, VmaPool.
4754 */
4755 template<typename T>
4756 static bool VmaValidatePointerArray(uint32_t count, const T* arr)
4757 {
4758  for(uint32_t i = 0; i < count; ++i)
4759  {
4760  const T iPtr = arr[i];
4761  if(iPtr == VMA_NULL)
4762  {
4763  return false;
4764  }
4765  for(uint32_t j = i + 1; j < count; ++j)
4766  {
4767  if(iPtr == arr[j])
4768  {
4769  return false;
4770  }
4771  }
4772  }
4773  return true;
4774 }
4775 
4776 template<typename MainT, typename NewT>
4777 static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
4778 {
4779  newStruct->pNext = mainStruct->pNext;
4780  mainStruct->pNext = newStruct;
4781 }
4782 
4784 // Memory allocation
4785 
4786 static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
4787 {
4788  void* result = VMA_NULL;
4789  if((pAllocationCallbacks != VMA_NULL) &&
4790  (pAllocationCallbacks->pfnAllocation != VMA_NULL))
4791  {
4792  result = (*pAllocationCallbacks->pfnAllocation)(
4793  pAllocationCallbacks->pUserData,
4794  size,
4795  alignment,
4796  VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4797  }
4798  else
4799  {
4800  result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
4801  }
4802  VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
4803  return result;
4804 }
4805 
4806 static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
4807 {
4808  if((pAllocationCallbacks != VMA_NULL) &&
4809  (pAllocationCallbacks->pfnFree != VMA_NULL))
4810  {
4811  (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
4812  }
4813  else
4814  {
4815  VMA_SYSTEM_ALIGNED_FREE(ptr);
4816  }
4817 }
4818 
4819 template<typename T>
4820 static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
4821 {
4822  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
4823 }
4824 
4825 template<typename T>
4826 static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
4827 {
4828  return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
4829 }
4830 
4831 #define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
4832 
4833 #define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
4834 
4835 template<typename T>
4836 static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
4837 {
4838  ptr->~T();
4839  VmaFree(pAllocationCallbacks, ptr);
4840 }
4841 
4842 template<typename T>
4843 static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
4844 {
4845  if(ptr != VMA_NULL)
4846  {
4847  for(size_t i = count; i--; )
4848  {
4849  ptr[i].~T();
4850  }
4851  VmaFree(pAllocationCallbacks, ptr);
4852  }
4853 }
4854 
4855 static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
4856 {
4857  if(srcStr != VMA_NULL)
4858  {
4859  const size_t len = strlen(srcStr);
4860  char* const result = vma_new_array(allocs, char, len + 1);
4861  memcpy(result, srcStr, len + 1);
4862  return result;
4863  }
4864  else
4865  {
4866  return VMA_NULL;
4867  }
4868 }
4869 
4870 static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
4871 {
4872  if(str != VMA_NULL)
4873  {
4874  const size_t len = strlen(str);
4875  vma_delete_array(allocs, str, len + 1);
4876  }
4877 }
4878 
4879 // STL-compatible allocator.
4880 template<typename T>
4881 class VmaStlAllocator
4882 {
4883 public:
4884  const VkAllocationCallbacks* const m_pCallbacks;
4885  typedef T value_type;
4886 
4887  VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { }
4888  template<typename U> VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) { }
4889 
4890  T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
4891  void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
4892 
4893  template<typename U>
4894  bool operator==(const VmaStlAllocator<U>& rhs) const
4895  {
4896  return m_pCallbacks == rhs.m_pCallbacks;
4897  }
4898  template<typename U>
4899  bool operator!=(const VmaStlAllocator<U>& rhs) const
4900  {
4901  return m_pCallbacks != rhs.m_pCallbacks;
4902  }
4903 
4904  VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete;
4905 };
4906 
4907 #if VMA_USE_STL_VECTOR
4908 
4909 #define VmaVector std::vector
4910 
4911 template<typename T, typename allocatorT>
4912 static void VmaVectorInsert(std::vector<T, allocatorT>& vec, size_t index, const T& item)
4913 {
4914  vec.insert(vec.begin() + index, item);
4915 }
4916 
4917 template<typename T, typename allocatorT>
4918 static void VmaVectorRemove(std::vector<T, allocatorT>& vec, size_t index)
4919 {
4920  vec.erase(vec.begin() + index);
4921 }
4922 
4923 #else // #if VMA_USE_STL_VECTOR
4924 
4925 /* Class with interface compatible with subset of std::vector.
4926 T must be POD because constructors and destructors are not called and memcpy is
4927 used for these objects. */
4928 template<typename T, typename AllocatorT>
4929 class VmaVector
4930 {
4931 public:
4932  typedef T value_type;
4933 
4934  VmaVector(const AllocatorT& allocator) :
4935  m_Allocator(allocator),
4936  m_pArray(VMA_NULL),
4937  m_Count(0),
4938  m_Capacity(0)
4939  {
4940  }
4941 
4942  VmaVector(size_t count, const AllocatorT& allocator) :
4943  m_Allocator(allocator),
4944  m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
4945  m_Count(count),
4946  m_Capacity(count)
4947  {
4948  }
4949 
4950  // This version of the constructor is here for compatibility with pre-C++14 std::vector.
4951  // value is unused.
4952  VmaVector(size_t count, const T& value, const AllocatorT& allocator)
4953  : VmaVector(count, allocator) {}
4954 
4955  VmaVector(const VmaVector<T, AllocatorT>& src) :
4956  m_Allocator(src.m_Allocator),
4957  m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
4958  m_Count(src.m_Count),
4959  m_Capacity(src.m_Count)
4960  {
4961  if(m_Count != 0)
4962  {
4963  memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
4964  }
4965  }
4966 
4967  ~VmaVector()
4968  {
4969  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
4970  }
4971 
4972  VmaVector& operator=(const VmaVector<T, AllocatorT>& rhs)
4973  {
4974  if(&rhs != this)
4975  {
4976  resize(rhs.m_Count);
4977  if(m_Count != 0)
4978  {
4979  memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
4980  }
4981  }
4982  return *this;
4983  }
4984 
4985  bool empty() const { return m_Count == 0; }
4986  size_t size() const { return m_Count; }
4987  T* data() { return m_pArray; }
4988  const T* data() const { return m_pArray; }
4989 
4990  T& operator[](size_t index)
4991  {
4992  VMA_HEAVY_ASSERT(index < m_Count);
4993  return m_pArray[index];
4994  }
4995  const T& operator[](size_t index) const
4996  {
4997  VMA_HEAVY_ASSERT(index < m_Count);
4998  return m_pArray[index];
4999  }
5000 
5001  T& front()
5002  {
5003  VMA_HEAVY_ASSERT(m_Count > 0);
5004  return m_pArray[0];
5005  }
5006  const T& front() const
5007  {
5008  VMA_HEAVY_ASSERT(m_Count > 0);
5009  return m_pArray[0];
5010  }
5011  T& back()
5012  {
5013  VMA_HEAVY_ASSERT(m_Count > 0);
5014  return m_pArray[m_Count - 1];
5015  }
5016  const T& back() const
5017  {
5018  VMA_HEAVY_ASSERT(m_Count > 0);
5019  return m_pArray[m_Count - 1];
5020  }
5021 
5022  void reserve(size_t newCapacity, bool freeMemory = false)
5023  {
5024  newCapacity = VMA_MAX(newCapacity, m_Count);
5025 
5026  if((newCapacity < m_Capacity) && !freeMemory)
5027  {
5028  newCapacity = m_Capacity;
5029  }
5030 
5031  if(newCapacity != m_Capacity)
5032  {
5033  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
5034  if(m_Count != 0)
5035  {
5036  memcpy(newArray, m_pArray, m_Count * sizeof(T));
5037  }
5038  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5039  m_Capacity = newCapacity;
5040  m_pArray = newArray;
5041  }
5042  }
5043 
5044  void resize(size_t newCount, bool freeMemory = false)
5045  {
5046  size_t newCapacity = m_Capacity;
5047  if(newCount > m_Capacity)
5048  {
5049  newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
5050  }
5051  else if(freeMemory)
5052  {
5053  newCapacity = newCount;
5054  }
5055 
5056  if(newCapacity != m_Capacity)
5057  {
5058  T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
5059  const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
5060  if(elementsToCopy != 0)
5061  {
5062  memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
5063  }
5064  VmaFree(m_Allocator.m_pCallbacks, m_pArray);
5065  m_Capacity = newCapacity;
5066  m_pArray = newArray;
5067  }
5068 
5069  m_Count = newCount;
5070  }
5071 
5072  void clear(bool freeMemory = false)
5073  {
5074  resize(0, freeMemory);
5075  }
5076 
5077  void insert(size_t index, const T& src)
5078  {
5079  VMA_HEAVY_ASSERT(index <= m_Count);
5080  const size_t oldCount = size();
5081  resize(oldCount + 1);
5082  if(index < oldCount)
5083  {
5084  memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
5085  }
5086  m_pArray[index] = src;
5087  }
5088 
5089  void remove(size_t index)
5090  {
5091  VMA_HEAVY_ASSERT(index < m_Count);
5092  const size_t oldCount = size();
5093  if(index < oldCount - 1)
5094  {
5095  memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
5096  }
5097  resize(oldCount - 1);
5098  }
5099 
5100  void push_back(const T& src)
5101  {
5102  const size_t newIndex = size();
5103  resize(newIndex + 1);
5104  m_pArray[newIndex] = src;
5105  }
5106 
5107  void pop_back()
5108  {
5109  VMA_HEAVY_ASSERT(m_Count > 0);
5110  resize(size() - 1);
5111  }
5112 
5113  void push_front(const T& src)
5114  {
5115  insert(0, src);
5116  }
5117 
5118  void pop_front()
5119  {
5120  VMA_HEAVY_ASSERT(m_Count > 0);
5121  remove(0);
5122  }
5123 
5124  typedef T* iterator;
5125 
5126  iterator begin() { return m_pArray; }
5127  iterator end() { return m_pArray + m_Count; }
5128 
5129 private:
5130  AllocatorT m_Allocator;
5131  T* m_pArray;
5132  size_t m_Count;
5133  size_t m_Capacity;
5134 };
5135 
5136 template<typename T, typename allocatorT>
5137 static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
5138 {
5139  vec.insert(index, item);
5140 }
5141 
5142 template<typename T, typename allocatorT>
5143 static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
5144 {
5145  vec.remove(index);
5146 }
5147 
5148 #endif // #if VMA_USE_STL_VECTOR
5149 
5150 template<typename CmpLess, typename VectorT>
5151 size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
5152 {
5153  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
5154  vector.data(),
5155  vector.data() + vector.size(),
5156  value,
5157  CmpLess()) - vector.data();
5158  VmaVectorInsert(vector, indexToInsert, value);
5159  return indexToInsert;
5160 }
5161 
5162 template<typename CmpLess, typename VectorT>
5163 bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
5164 {
5165  CmpLess comparator;
5166  typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
5167  vector.begin(),
5168  vector.end(),
5169  value,
5170  comparator);
5171  if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
5172  {
5173  size_t indexToRemove = it - vector.begin();
5174  VmaVectorRemove(vector, indexToRemove);
5175  return true;
5176  }
5177  return false;
5178 }
5179 
5181 // class VmaSmallVector
5182 
5183 /*
5184 This is a vector (a variable-sized array), optimized for the case when the array is small.
5185 
5186 It contains some number of elements in-place, which allows it to avoid heap allocation
5187 when the actual number of elements is below that threshold. This allows normal "small"
5188 cases to be fast without losing generality for large inputs.
5189 */
5190 
5191 template<typename T, typename AllocatorT, size_t N>
5192 class VmaSmallVector
5193 {
5194 public:
5195  typedef T value_type;
5196 
5197  VmaSmallVector(const AllocatorT& allocator) :
5198  m_Count(0),
5199  m_DynamicArray(allocator)
5200  {
5201  }
5202  VmaSmallVector(size_t count, const AllocatorT& allocator) :
5203  m_Count(count),
5204  m_DynamicArray(count > N ? count : 0, allocator)
5205  {
5206  }
5207  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5208  VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& src) = delete;
5209  template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
5210  VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>& rhs) = delete;
5211 
5212  bool empty() const { return m_Count == 0; }
5213  size_t size() const { return m_Count; }
5214  T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5215  const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
5216 
5217  T& operator[](size_t index)
5218  {
5219  VMA_HEAVY_ASSERT(index < m_Count);
5220  return data()[index];
5221  }
5222  const T& operator[](size_t index) const
5223  {
5224  VMA_HEAVY_ASSERT(index < m_Count);
5225  return data()[index];
5226  }
5227 
5228  T& front()
5229  {
5230  VMA_HEAVY_ASSERT(m_Count > 0);
5231  return data()[0];
5232  }
5233  const T& front() const
5234  {
5235  VMA_HEAVY_ASSERT(m_Count > 0);
5236  return data()[0];
5237  }
5238  T& back()
5239  {
5240  VMA_HEAVY_ASSERT(m_Count > 0);
5241  return data()[m_Count - 1];
5242  }
5243  const T& back() const
5244  {
5245  VMA_HEAVY_ASSERT(m_Count > 0);
5246  return data()[m_Count - 1];
5247  }
5248 
5249  void resize(size_t newCount, bool freeMemory = false)
5250  {
5251  if(newCount > N && m_Count > N)
5252  {
5253  // Any direction, staying in m_DynamicArray
5254  m_DynamicArray.resize(newCount, freeMemory);
5255  }
5256  else if(newCount > N && m_Count <= N)
5257  {
5258  // Growing, moving from m_StaticArray to m_DynamicArray
5259  m_DynamicArray.resize(newCount, freeMemory);
5260  if(m_Count > 0)
5261  {
5262  memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
5263  }
5264  }
5265  else if(newCount <= N && m_Count > N)
5266  {
5267  // Shrinking, moving from m_DynamicArray to m_StaticArray
5268  if(newCount > 0)
5269  {
5270  memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
5271  }
5272  m_DynamicArray.resize(0, freeMemory);
5273  }
5274  else
5275  {
5276  // Any direction, staying in m_StaticArray - nothing to do here
5277  }
5278  m_Count = newCount;
5279  }
5280 
5281  void clear(bool freeMemory = false)
5282  {
5283  m_DynamicArray.clear(freeMemory);
5284  m_Count = 0;
5285  }
5286 
5287  void insert(size_t index, const T& src)
5288  {
5289  VMA_HEAVY_ASSERT(index <= m_Count);
5290  const size_t oldCount = size();
5291  resize(oldCount + 1);
5292  T* const dataPtr = data();
5293  if(index < oldCount)
5294  {
5295  // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
5296  memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
5297  }
5298  dataPtr[index] = src;
5299  }
5300 
5301  void remove(size_t index)
5302  {
5303  VMA_HEAVY_ASSERT(index < m_Count);
5304  const size_t oldCount = size();
5305  if(index < oldCount - 1)
5306  {
5307  // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
5308  T* const dataPtr = data();
5309  memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
5310  }
5311  resize(oldCount - 1);
5312  }
5313 
5314  void push_back(const T& src)
5315  {
5316  const size_t newIndex = size();
5317  resize(newIndex + 1);
5318  data()[newIndex] = src;
5319  }
5320 
5321  void pop_back()
5322  {
5323  VMA_HEAVY_ASSERT(m_Count > 0);
5324  resize(size() - 1);
5325  }
5326 
5327  void push_front(const T& src)
5328  {
5329  insert(0, src);
5330  }
5331 
5332  void pop_front()
5333  {
5334  VMA_HEAVY_ASSERT(m_Count > 0);
5335  remove(0);
5336  }
5337 
5338  typedef T* iterator;
5339 
5340  iterator begin() { return data(); }
5341  iterator end() { return data() + m_Count; }
5342 
5343 private:
5344  size_t m_Count;
5345  T m_StaticArray[N]; // Used when m_Size <= N
5346  VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
5347 };
5348 
5350 // class VmaPoolAllocator
5351 
5352 /*
5353 Allocator for objects of type T using a list of arrays (pools) to speed up
5354 allocation. Number of elements that can be allocated is not bounded because
5355 allocator can create multiple blocks.
5356 */
5357 template<typename T>
5358 class VmaPoolAllocator
5359 {
5360  VMA_CLASS_NO_COPY(VmaPoolAllocator)
5361 public:
5362  VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
5363  ~VmaPoolAllocator();
5364  template<typename... Types> T* Alloc(Types... args);
5365  void Free(T* ptr);
5366 
5367 private:
5368  union Item
5369  {
5370  uint32_t NextFreeIndex;
5371  alignas(T) char Value[sizeof(T)];
5372  };
5373 
5374  struct ItemBlock
5375  {
5376  Item* pItems;
5377  uint32_t Capacity;
5378  uint32_t FirstFreeIndex;
5379  };
5380 
5381  const VkAllocationCallbacks* m_pAllocationCallbacks;
5382  const uint32_t m_FirstBlockCapacity;
5383  VmaVector< ItemBlock, VmaStlAllocator<ItemBlock> > m_ItemBlocks;
5384 
5385  ItemBlock& CreateNewBlock();
5386 };
5387 
5388 template<typename T>
5389 VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) :
5390  m_pAllocationCallbacks(pAllocationCallbacks),
5391  m_FirstBlockCapacity(firstBlockCapacity),
5392  m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
5393 {
5394  VMA_ASSERT(m_FirstBlockCapacity > 1);
5395 }
5396 
5397 template<typename T>
5398 VmaPoolAllocator<T>::~VmaPoolAllocator()
5399 {
5400  for(size_t i = m_ItemBlocks.size(); i--; )
5401  vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
5402  m_ItemBlocks.clear();
5403 }
5404 
5405 template<typename T>
5406 template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types... args)
5407 {
5408  for(size_t i = m_ItemBlocks.size(); i--; )
5409  {
5410  ItemBlock& block = m_ItemBlocks[i];
5411  // This block has some free items: Use first one.
5412  if(block.FirstFreeIndex != UINT32_MAX)
5413  {
5414  Item* const pItem = &block.pItems[block.FirstFreeIndex];
5415  block.FirstFreeIndex = pItem->NextFreeIndex;
5416  T* result = (T*)&pItem->Value;
5417  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5418  return result;
5419  }
5420  }
5421 
5422  // No block has free item: Create new one and use it.
5423  ItemBlock& newBlock = CreateNewBlock();
5424  Item* const pItem = &newBlock.pItems[0];
5425  newBlock.FirstFreeIndex = pItem->NextFreeIndex;
5426  T* result = (T*)&pItem->Value;
5427  new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
5428  return result;
5429 }
5430 
5431 template<typename T>
5432 void VmaPoolAllocator<T>::Free(T* ptr)
5433 {
5434  // Search all memory blocks to find ptr.
5435  for(size_t i = m_ItemBlocks.size(); i--; )
5436  {
5437  ItemBlock& block = m_ItemBlocks[i];
5438 
5439  // Casting to union.
5440  Item* pItemPtr;
5441  memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
5442 
5443  // Check if pItemPtr is in address range of this block.
5444  if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
5445  {
5446  ptr->~T(); // Explicit destructor call.
5447  const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
5448  pItemPtr->NextFreeIndex = block.FirstFreeIndex;
5449  block.FirstFreeIndex = index;
5450  return;
5451  }
5452  }
5453  VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
5454 }
5455 
5456 template<typename T>
5457 typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
5458 {
5459  const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
5460  m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
5461 
5462  const ItemBlock newBlock = {
5463  vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
5464  newBlockCapacity,
5465  0 };
5466 
5467  m_ItemBlocks.push_back(newBlock);
5468 
5469  // Setup singly-linked list of all free items in this block.
5470  for(uint32_t i = 0; i < newBlockCapacity - 1; ++i)
5471  newBlock.pItems[i].NextFreeIndex = i + 1;
5472  newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
5473  return m_ItemBlocks.back();
5474 }
5475 
5477 // class VmaRawList, VmaList
5478 
5479 #if VMA_USE_STL_LIST
5480 
5481 #define VmaList std::list
5482 
5483 #else // #if VMA_USE_STL_LIST
5484 
5485 template<typename T>
5486 struct VmaListItem
5487 {
5488  VmaListItem* pPrev;
5489  VmaListItem* pNext;
5490  T Value;
5491 };
5492 
5493 // Doubly linked list.
5494 template<typename T>
5495 class VmaRawList
5496 {
5497  VMA_CLASS_NO_COPY(VmaRawList)
5498 public:
5499  typedef VmaListItem<T> ItemType;
5500 
5501  VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
5502  ~VmaRawList();
5503  void Clear();
5504 
5505  size_t GetCount() const { return m_Count; }
5506  bool IsEmpty() const { return m_Count == 0; }
5507 
5508  ItemType* Front() { return m_pFront; }
5509  const ItemType* Front() const { return m_pFront; }
5510  ItemType* Back() { return m_pBack; }
5511  const ItemType* Back() const { return m_pBack; }
5512 
5513  ItemType* PushBack();
5514  ItemType* PushFront();
5515  ItemType* PushBack(const T& value);
5516  ItemType* PushFront(const T& value);
5517  void PopBack();
5518  void PopFront();
5519 
5520  // Item can be null - it means PushBack.
5521  ItemType* InsertBefore(ItemType* pItem);
5522  // Item can be null - it means PushFront.
5523  ItemType* InsertAfter(ItemType* pItem);
5524 
5525  ItemType* InsertBefore(ItemType* pItem, const T& value);
5526  ItemType* InsertAfter(ItemType* pItem, const T& value);
5527 
5528  void Remove(ItemType* pItem);
5529 
5530 private:
5531  const VkAllocationCallbacks* const m_pAllocationCallbacks;
5532  VmaPoolAllocator<ItemType> m_ItemAllocator;
5533  ItemType* m_pFront;
5534  ItemType* m_pBack;
5535  size_t m_Count;
5536 };
5537 
5538 template<typename T>
5539 VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) :
5540  m_pAllocationCallbacks(pAllocationCallbacks),
5541  m_ItemAllocator(pAllocationCallbacks, 128),
5542  m_pFront(VMA_NULL),
5543  m_pBack(VMA_NULL),
5544  m_Count(0)
5545 {
5546 }
5547 
5548 template<typename T>
5549 VmaRawList<T>::~VmaRawList()
5550 {
5551  // Intentionally not calling Clear, because that would be unnecessary
5552  // computations to return all items to m_ItemAllocator as free.
5553 }
5554 
5555 template<typename T>
5556 void VmaRawList<T>::Clear()
5557 {
5558  if(IsEmpty() == false)
5559  {
5560  ItemType* pItem = m_pBack;
5561  while(pItem != VMA_NULL)
5562  {
5563  ItemType* const pPrevItem = pItem->pPrev;
5564  m_ItemAllocator.Free(pItem);
5565  pItem = pPrevItem;
5566  }
5567  m_pFront = VMA_NULL;
5568  m_pBack = VMA_NULL;
5569  m_Count = 0;
5570  }
5571 }
5572 
5573 template<typename T>
5574 VmaListItem<T>* VmaRawList<T>::PushBack()
5575 {
5576  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5577  pNewItem->pNext = VMA_NULL;
5578  if(IsEmpty())
5579  {
5580  pNewItem->pPrev = VMA_NULL;
5581  m_pFront = pNewItem;
5582  m_pBack = pNewItem;
5583  m_Count = 1;
5584  }
5585  else
5586  {
5587  pNewItem->pPrev = m_pBack;
5588  m_pBack->pNext = pNewItem;
5589  m_pBack = pNewItem;
5590  ++m_Count;
5591  }
5592  return pNewItem;
5593 }
5594 
5595 template<typename T>
5596 VmaListItem<T>* VmaRawList<T>::PushFront()
5597 {
5598  ItemType* const pNewItem = m_ItemAllocator.Alloc();
5599  pNewItem->pPrev = VMA_NULL;
5600  if(IsEmpty())
5601  {
5602  pNewItem->pNext = VMA_NULL;
5603  m_pFront = pNewItem;
5604  m_pBack = pNewItem;
5605  m_Count = 1;
5606  }
5607  else
5608  {
5609  pNewItem->pNext = m_pFront;
5610  m_pFront->pPrev = pNewItem;
5611  m_pFront = pNewItem;
5612  ++m_Count;
5613  }
5614  return pNewItem;
5615 }
5616 
5617 template<typename T>
5618 VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
5619 {
5620  ItemType* const pNewItem = PushBack();
5621  pNewItem->Value = value;
5622  return pNewItem;
5623 }
5624 
5625 template<typename T>
5626 VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
5627 {
5628  ItemType* const pNewItem = PushFront();
5629  pNewItem->Value = value;
5630  return pNewItem;
5631 }
5632 
5633 template<typename T>
5634 void VmaRawList<T>::PopBack()
5635 {
5636  VMA_HEAVY_ASSERT(m_Count > 0);
5637  ItemType* const pBackItem = m_pBack;
5638  ItemType* const pPrevItem = pBackItem->pPrev;
5639  if(pPrevItem != VMA_NULL)
5640  {
5641  pPrevItem->pNext = VMA_NULL;
5642  }
5643  m_pBack = pPrevItem;
5644  m_ItemAllocator.Free(pBackItem);
5645  --m_Count;
5646 }
5647 
5648 template<typename T>
5649 void VmaRawList<T>::PopFront()
5650 {
5651  VMA_HEAVY_ASSERT(m_Count > 0);
5652  ItemType* const pFrontItem = m_pFront;
5653  ItemType* const pNextItem = pFrontItem->pNext;
5654  if(pNextItem != VMA_NULL)
5655  {
5656  pNextItem->pPrev = VMA_NULL;
5657  }
5658  m_pFront = pNextItem;
5659  m_ItemAllocator.Free(pFrontItem);
5660  --m_Count;
5661 }
5662 
5663 template<typename T>
5664 void VmaRawList<T>::Remove(ItemType* pItem)
5665 {
5666  VMA_HEAVY_ASSERT(pItem != VMA_NULL);
5667  VMA_HEAVY_ASSERT(m_Count > 0);
5668 
5669  if(pItem->pPrev != VMA_NULL)
5670  {
5671  pItem->pPrev->pNext = pItem->pNext;
5672  }
5673  else
5674  {
5675  VMA_HEAVY_ASSERT(m_pFront == pItem);
5676  m_pFront = pItem->pNext;
5677  }
5678 
5679  if(pItem->pNext != VMA_NULL)
5680  {
5681  pItem->pNext->pPrev = pItem->pPrev;
5682  }
5683  else
5684  {
5685  VMA_HEAVY_ASSERT(m_pBack == pItem);
5686  m_pBack = pItem->pPrev;
5687  }
5688 
5689  m_ItemAllocator.Free(pItem);
5690  --m_Count;
5691 }
5692 
5693 template<typename T>
5694 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
5695 {
5696  if(pItem != VMA_NULL)
5697  {
5698  ItemType* const prevItem = pItem->pPrev;
5699  ItemType* const newItem = m_ItemAllocator.Alloc();
5700  newItem->pPrev = prevItem;
5701  newItem->pNext = pItem;
5702  pItem->pPrev = newItem;
5703  if(prevItem != VMA_NULL)
5704  {
5705  prevItem->pNext = newItem;
5706  }
5707  else
5708  {
5709  VMA_HEAVY_ASSERT(m_pFront == pItem);
5710  m_pFront = newItem;
5711  }
5712  ++m_Count;
5713  return newItem;
5714  }
5715  else
5716  return PushBack();
5717 }
5718 
5719 template<typename T>
5720 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
5721 {
5722  if(pItem != VMA_NULL)
5723  {
5724  ItemType* const nextItem = pItem->pNext;
5725  ItemType* const newItem = m_ItemAllocator.Alloc();
5726  newItem->pNext = nextItem;
5727  newItem->pPrev = pItem;
5728  pItem->pNext = newItem;
5729  if(nextItem != VMA_NULL)
5730  {
5731  nextItem->pPrev = newItem;
5732  }
5733  else
5734  {
5735  VMA_HEAVY_ASSERT(m_pBack == pItem);
5736  m_pBack = newItem;
5737  }
5738  ++m_Count;
5739  return newItem;
5740  }
5741  else
5742  return PushFront();
5743 }
5744 
5745 template<typename T>
5746 VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
5747 {
5748  ItemType* const newItem = InsertBefore(pItem);
5749  newItem->Value = value;
5750  return newItem;
5751 }
5752 
5753 template<typename T>
5754 VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
5755 {
5756  ItemType* const newItem = InsertAfter(pItem);
5757  newItem->Value = value;
5758  return newItem;
5759 }
5760 
5761 template<typename T, typename AllocatorT>
5762 class VmaList
5763 {
5764  VMA_CLASS_NO_COPY(VmaList)
5765 public:
5766  class iterator
5767  {
5768  public:
5769  iterator() :
5770  m_pList(VMA_NULL),
5771  m_pItem(VMA_NULL)
5772  {
5773  }
5774 
5775  T& operator*() const
5776  {
5777  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5778  return m_pItem->Value;
5779  }
5780  T* operator->() const
5781  {
5782  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5783  return &m_pItem->Value;
5784  }
5785 
5786  iterator& operator++()
5787  {
5788  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5789  m_pItem = m_pItem->pNext;
5790  return *this;
5791  }
5792  iterator& operator--()
5793  {
5794  if(m_pItem != VMA_NULL)
5795  {
5796  m_pItem = m_pItem->pPrev;
5797  }
5798  else
5799  {
5800  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5801  m_pItem = m_pList->Back();
5802  }
5803  return *this;
5804  }
5805 
5806  iterator operator++(int)
5807  {
5808  iterator result = *this;
5809  ++*this;
5810  return result;
5811  }
5812  iterator operator--(int)
5813  {
5814  iterator result = *this;
5815  --*this;
5816  return result;
5817  }
5818 
5819  bool operator==(const iterator& rhs) const
5820  {
5821  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5822  return m_pItem == rhs.m_pItem;
5823  }
5824  bool operator!=(const iterator& rhs) const
5825  {
5826  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5827  return m_pItem != rhs.m_pItem;
5828  }
5829 
5830  private:
5831  VmaRawList<T>* m_pList;
5832  VmaListItem<T>* m_pItem;
5833 
5834  iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) :
5835  m_pList(pList),
5836  m_pItem(pItem)
5837  {
5838  }
5839 
5840  friend class VmaList<T, AllocatorT>;
5841  };
5842 
5843  class const_iterator
5844  {
5845  public:
5846  const_iterator() :
5847  m_pList(VMA_NULL),
5848  m_pItem(VMA_NULL)
5849  {
5850  }
5851 
5852  const_iterator(const iterator& src) :
5853  m_pList(src.m_pList),
5854  m_pItem(src.m_pItem)
5855  {
5856  }
5857 
5858  const T& operator*() const
5859  {
5860  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5861  return m_pItem->Value;
5862  }
5863  const T* operator->() const
5864  {
5865  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5866  return &m_pItem->Value;
5867  }
5868 
5869  const_iterator& operator++()
5870  {
5871  VMA_HEAVY_ASSERT(m_pItem != VMA_NULL);
5872  m_pItem = m_pItem->pNext;
5873  return *this;
5874  }
5875  const_iterator& operator--()
5876  {
5877  if(m_pItem != VMA_NULL)
5878  {
5879  m_pItem = m_pItem->pPrev;
5880  }
5881  else
5882  {
5883  VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
5884  m_pItem = m_pList->Back();
5885  }
5886  return *this;
5887  }
5888 
5889  const_iterator operator++(int)
5890  {
5891  const_iterator result = *this;
5892  ++*this;
5893  return result;
5894  }
5895  const_iterator operator--(int)
5896  {
5897  const_iterator result = *this;
5898  --*this;
5899  return result;
5900  }
5901 
5902  bool operator==(const const_iterator& rhs) const
5903  {
5904  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5905  return m_pItem == rhs.m_pItem;
5906  }
5907  bool operator!=(const const_iterator& rhs) const
5908  {
5909  VMA_HEAVY_ASSERT(m_pList == rhs.m_pList);
5910  return m_pItem != rhs.m_pItem;
5911  }
5912 
5913  private:
5914  const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) :
5915  m_pList(pList),
5916  m_pItem(pItem)
5917  {
5918  }
5919 
5920  const VmaRawList<T>* m_pList;
5921  const VmaListItem<T>* m_pItem;
5922 
5923  friend class VmaList<T, AllocatorT>;
5924  };
5925 
5926  VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { }
5927 
5928  bool empty() const { return m_RawList.IsEmpty(); }
5929  size_t size() const { return m_RawList.GetCount(); }
5930 
5931  iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
5932  iterator end() { return iterator(&m_RawList, VMA_NULL); }
5933 
5934  const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
5935  const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
5936 
5937  void clear() { m_RawList.Clear(); }
5938  void push_back(const T& value) { m_RawList.PushBack(value); }
5939  void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
5940  iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
5941 
5942 private:
5943  VmaRawList<T> m_RawList;
5944 };
5945 
5946 #endif // #if VMA_USE_STL_LIST
5947 
5949 // class VmaMap
5950 
5951 // Unused in this version.
5952 #if 0
5953 
5954 #if VMA_USE_STL_UNORDERED_MAP
5955 
5956 #define VmaPair std::pair
5957 
5958 #define VMA_MAP_TYPE(KeyT, ValueT) \
5959  std::unordered_map< KeyT, ValueT, std::hash<KeyT>, std::equal_to<KeyT>, VmaStlAllocator< std::pair<KeyT, ValueT> > >
5960 
5961 #else // #if VMA_USE_STL_UNORDERED_MAP
5962 
5963 template<typename T1, typename T2>
5964 struct VmaPair
5965 {
5966  T1 first;
5967  T2 second;
5968 
5969  VmaPair() : first(), second() { }
5970  VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { }
5971 };
5972 
5973 /* Class compatible with subset of interface of std::unordered_map.
5974 KeyT, ValueT must be POD because they will be stored in VmaVector.
5975 */
5976 template<typename KeyT, typename ValueT>
5977 class VmaMap
5978 {
5979 public:
5980  typedef VmaPair<KeyT, ValueT> PairType;
5981  typedef PairType* iterator;
5982 
5983  VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) { }
5984 
5985  iterator begin() { return m_Vector.begin(); }
5986  iterator end() { return m_Vector.end(); }
5987 
5988  void insert(const PairType& pair);
5989  iterator find(const KeyT& key);
5990  void erase(iterator it);
5991 
5992 private:
5993  VmaVector< PairType, VmaStlAllocator<PairType> > m_Vector;
5994 };
5995 
5996 #define VMA_MAP_TYPE(KeyT, ValueT) VmaMap<KeyT, ValueT>
5997 
5998 template<typename FirstT, typename SecondT>
5999 struct VmaPairFirstLess
6000 {
6001  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
6002  {
6003  return lhs.first < rhs.first;
6004  }
6005  bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
6006  {
6007  return lhs.first < rhsFirst;
6008  }
6009 };
6010 
6011 template<typename KeyT, typename ValueT>
6012 void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
6013 {
6014  const size_t indexToInsert = VmaBinaryFindFirstNotLess(
6015  m_Vector.data(),
6016  m_Vector.data() + m_Vector.size(),
6017  pair,
6018  VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
6019  VmaVectorInsert(m_Vector, indexToInsert, pair);
6020 }
6021 
6022 template<typename KeyT, typename ValueT>
6023 VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
6024 {
6025  PairType* it = VmaBinaryFindFirstNotLess(
6026  m_Vector.data(),
6027  m_Vector.data() + m_Vector.size(),
6028  key,
6029  VmaPairFirstLess<KeyT, ValueT>());
6030  if((it != m_Vector.end()) && (it->first == key))
6031  {
6032  return it;
6033  }
6034  else
6035  {
6036  return m_Vector.end();
6037  }
6038 }
6039 
6040 template<typename KeyT, typename ValueT>
6041 void VmaMap<KeyT, ValueT>::erase(iterator it)
6042 {
6043  VmaVectorRemove(m_Vector, it - m_Vector.begin());
6044 }
6045 
6046 #endif // #if VMA_USE_STL_UNORDERED_MAP
6047 
6048 #endif // #if 0
6049 
6051 
6052 class VmaDeviceMemoryBlock;
6053 
6054 enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE };
6055 
6056 struct VmaAllocation_T
6057 {
6058 private:
6059  static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80;
6060 
6061  enum FLAGS
6062  {
6063  FLAG_USER_DATA_STRING = 0x01,
6064  };
6065 
6066 public:
6067  enum ALLOCATION_TYPE
6068  {
6069  ALLOCATION_TYPE_NONE,
6070  ALLOCATION_TYPE_BLOCK,
6071  ALLOCATION_TYPE_DEDICATED,
6072  };
6073 
6074  /*
6075  This struct is allocated using VmaPoolAllocator.
6076  */
6077 
6078  VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) :
6079  m_Alignment{1},
6080  m_Size{0},
6081  m_pUserData{VMA_NULL},
6082  m_LastUseFrameIndex{currentFrameIndex},
6083  m_MemoryTypeIndex{0},
6084  m_Type{(uint8_t)ALLOCATION_TYPE_NONE},
6085  m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN},
6086  m_MapCount{0},
6087  m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0}
6088  {
6089 #if VMA_STATS_STRING_ENABLED
6090  m_CreationFrameIndex = currentFrameIndex;
6091  m_BufferImageUsage = 0;
6092 #endif
6093  }
6094 
6095  ~VmaAllocation_T()
6096  {
6097  VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction.");
6098 
6099  // Check if owned string was freed.
6100  VMA_ASSERT(m_pUserData == VMA_NULL);
6101  }
6102 
6103  void InitBlockAllocation(
6104  VmaDeviceMemoryBlock* block,
6105  VkDeviceSize offset,
6106  VkDeviceSize alignment,
6107  VkDeviceSize size,
6108  uint32_t memoryTypeIndex,
6109  VmaSuballocationType suballocationType,
6110  bool mapped,
6111  bool canBecomeLost)
6112  {
6113  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6114  VMA_ASSERT(block != VMA_NULL);
6115  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6116  m_Alignment = alignment;
6117  m_Size = size;
6118  m_MemoryTypeIndex = memoryTypeIndex;
6119  m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6120  m_SuballocationType = (uint8_t)suballocationType;
6121  m_BlockAllocation.m_Block = block;
6122  m_BlockAllocation.m_Offset = offset;
6123  m_BlockAllocation.m_CanBecomeLost = canBecomeLost;
6124  }
6125 
6126  void InitLost()
6127  {
6128  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6129  VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST);
6130  m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
6131  m_MemoryTypeIndex = 0;
6132  m_BlockAllocation.m_Block = VMA_NULL;
6133  m_BlockAllocation.m_Offset = 0;
6134  m_BlockAllocation.m_CanBecomeLost = true;
6135  }
6136 
6137  void ChangeBlockAllocation(
6138  VmaAllocator hAllocator,
6139  VmaDeviceMemoryBlock* block,
6140  VkDeviceSize offset);
6141 
6142  void ChangeOffset(VkDeviceSize newOffset);
6143 
6144  // pMappedData not null means allocation is created with MAPPED flag.
6145  void InitDedicatedAllocation(
6146  uint32_t memoryTypeIndex,
6147  VkDeviceMemory hMemory,
6148  VmaSuballocationType suballocationType,
6149  void* pMappedData,
6150  VkDeviceSize size)
6151  {
6152  VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
6153  VMA_ASSERT(hMemory != VK_NULL_HANDLE);
6154  m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
6155  m_Alignment = 0;
6156  m_Size = size;
6157  m_MemoryTypeIndex = memoryTypeIndex;
6158  m_SuballocationType = (uint8_t)suballocationType;
6159  m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0;
6160  m_DedicatedAllocation.m_hMemory = hMemory;
6161  m_DedicatedAllocation.m_pMappedData = pMappedData;
6162  }
6163 
6164  ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
6165  VkDeviceSize GetAlignment() const { return m_Alignment; }
6166  VkDeviceSize GetSize() const { return m_Size; }
6167  bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; }
6168  void* GetUserData() const { return m_pUserData; }
6169  void SetUserData(VmaAllocator hAllocator, void* pUserData);
6170  VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
6171 
6172  VmaDeviceMemoryBlock* GetBlock() const
6173  {
6174  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
6175  return m_BlockAllocation.m_Block;
6176  }
6177  VkDeviceSize GetOffset() const;
6178  VkDeviceMemory GetMemory() const;
6179  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6180  bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; }
6181  void* GetMappedData() const;
6182  bool CanBecomeLost() const;
6183 
6184  uint32_t GetLastUseFrameIndex() const
6185  {
6186  return m_LastUseFrameIndex.load();
6187  }
6188  bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired)
6189  {
6190  return m_LastUseFrameIndex.compare_exchange_weak(expected, desired);
6191  }
6192  /*
6193  - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex,
6194  makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true.
6195  - Else, returns false.
6196 
6197  If hAllocation is already lost, assert - you should not call it then.
6198  If hAllocation was not created with CAN_BECOME_LOST_BIT, assert.
6199  */
6200  bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6201 
6202  void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo)
6203  {
6204  VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED);
6205  outInfo.blockCount = 1;
6206  outInfo.allocationCount = 1;
6207  outInfo.unusedRangeCount = 0;
6208  outInfo.usedBytes = m_Size;
6209  outInfo.unusedBytes = 0;
6210  outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size;
6211  outInfo.unusedRangeSizeMin = UINT64_MAX;
6212  outInfo.unusedRangeSizeMax = 0;
6213  }
6214 
6215  void BlockAllocMap();
6216  void BlockAllocUnmap();
6217  VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
6218  void DedicatedAllocUnmap(VmaAllocator hAllocator);
6219 
6220 #if VMA_STATS_STRING_ENABLED
6221  uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; }
6222  uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
6223 
6224  void InitBufferImageUsage(uint32_t bufferImageUsage)
6225  {
6226  VMA_ASSERT(m_BufferImageUsage == 0);
6227  m_BufferImageUsage = bufferImageUsage;
6228  }
6229 
6230  void PrintParameters(class VmaJsonWriter& json) const;
6231 #endif
6232 
6233 private:
6234  VkDeviceSize m_Alignment;
6235  VkDeviceSize m_Size;
6236  void* m_pUserData;
6237  VMA_ATOMIC_UINT32 m_LastUseFrameIndex;
6238  uint32_t m_MemoryTypeIndex;
6239  uint8_t m_Type; // ALLOCATION_TYPE
6240  uint8_t m_SuballocationType; // VmaSuballocationType
6241  // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT.
6242  // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory().
6243  uint8_t m_MapCount;
6244  uint8_t m_Flags; // enum FLAGS
6245 
6246  // Allocation out of VmaDeviceMemoryBlock.
6247  struct BlockAllocation
6248  {
6249  VmaDeviceMemoryBlock* m_Block;
6250  VkDeviceSize m_Offset;
6251  bool m_CanBecomeLost;
6252  };
6253 
6254  // Allocation for an object that has its own private VkDeviceMemory.
6255  struct DedicatedAllocation
6256  {
6257  VkDeviceMemory m_hMemory;
6258  void* m_pMappedData; // Not null means memory is mapped.
6259  };
6260 
6261  union
6262  {
6263  // Allocation out of VmaDeviceMemoryBlock.
6264  BlockAllocation m_BlockAllocation;
6265  // Allocation for an object that has its own private VkDeviceMemory.
6266  DedicatedAllocation m_DedicatedAllocation;
6267  };
6268 
6269 #if VMA_STATS_STRING_ENABLED
6270  uint32_t m_CreationFrameIndex;
6271  uint32_t m_BufferImageUsage; // 0 if unknown.
6272 #endif
6273 
6274  void FreeUserDataString(VmaAllocator hAllocator);
6275 };
6276 
6277 /*
6278 Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
6279 allocated memory block or free.
6280 */
6281 struct VmaSuballocation
6282 {
6283  VkDeviceSize offset;
6284  VkDeviceSize size;
6285  VmaAllocation hAllocation;
6286  VmaSuballocationType type;
6287 };
6288 
6289 // Comparator for offsets.
6290 struct VmaSuballocationOffsetLess
6291 {
6292  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6293  {
6294  return lhs.offset < rhs.offset;
6295  }
6296 };
6297 struct VmaSuballocationOffsetGreater
6298 {
6299  bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
6300  {
6301  return lhs.offset > rhs.offset;
6302  }
6303 };
6304 
6305 typedef VmaList< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > VmaSuballocationList;
6306 
6307 // Cost of one additional allocation lost, as equivalent in bytes.
6308 static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576;
6309 
6310 enum class VmaAllocationRequestType
6311 {
6312  Normal,
6313  // Used by "Linear" algorithm.
6314  UpperAddress,
6315  EndOf1st,
6316  EndOf2nd,
6317 };
6318 
6319 /*
6320 Parameters of planned allocation inside a VmaDeviceMemoryBlock.
6321 
6322 If canMakeOtherLost was false:
6323 - item points to a FREE suballocation.
6324 - itemsToMakeLostCount is 0.
6325 
6326 If canMakeOtherLost was true:
6327 - item points to first of sequence of suballocations, which are either FREE,
6328  or point to VmaAllocations that can become lost.
6329 - itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for
6330  the requested allocation to succeed.
6331 */
6332 struct VmaAllocationRequest
6333 {
6334  VkDeviceSize offset;
6335  VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation.
6336  VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation.
6337  VmaSuballocationList::iterator item;
6338  size_t itemsToMakeLostCount;
6339  void* customData;
6340  VmaAllocationRequestType type;
6341 
6342  VkDeviceSize CalcCost() const
6343  {
6344  return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST;
6345  }
6346 };
6347 
6348 /*
6349 Data structure used for bookkeeping of allocations and unused ranges of memory
6350 in a single VkDeviceMemory block.
6351 */
6352 class VmaBlockMetadata
6353 {
6354 public:
6355  VmaBlockMetadata(VmaAllocator hAllocator);
6356  virtual ~VmaBlockMetadata() { }
6357  virtual void Init(VkDeviceSize size) { m_Size = size; }
6358 
6359  // Validates all data structures inside this object. If not valid, returns false.
6360  virtual bool Validate() const = 0;
6361  VkDeviceSize GetSize() const { return m_Size; }
6362  virtual size_t GetAllocationCount() const = 0;
6363  virtual VkDeviceSize GetSumFreeSize() const = 0;
6364  virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0;
6365  // Returns true if this block is empty - contains only single free suballocation.
6366  virtual bool IsEmpty() const = 0;
6367 
6368  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0;
6369  // Shouldn't modify blockCount.
6370  virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0;
6371 
6372 #if VMA_STATS_STRING_ENABLED
6373  virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
6374 #endif
6375 
6376  // Tries to find a place for suballocation with given parameters inside this block.
6377  // If succeeded, fills pAllocationRequest and returns true.
6378  // If failed, returns false.
6379  virtual bool CreateAllocationRequest(
6380  uint32_t currentFrameIndex,
6381  uint32_t frameInUseCount,
6382  VkDeviceSize bufferImageGranularity,
6383  VkDeviceSize allocSize,
6384  VkDeviceSize allocAlignment,
6385  bool upperAddress,
6386  VmaSuballocationType allocType,
6387  bool canMakeOtherLost,
6388  // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
6389  uint32_t strategy,
6390  VmaAllocationRequest* pAllocationRequest) = 0;
6391 
6392  virtual bool MakeRequestedAllocationsLost(
6393  uint32_t currentFrameIndex,
6394  uint32_t frameInUseCount,
6395  VmaAllocationRequest* pAllocationRequest) = 0;
6396 
6397  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0;
6398 
6399  virtual VkResult CheckCorruption(const void* pBlockData) = 0;
6400 
6401  // Makes actual allocation based on request. Request must already be checked and valid.
6402  virtual void Alloc(
6403  const VmaAllocationRequest& request,
6404  VmaSuballocationType type,
6405  VkDeviceSize allocSize,
6406  VmaAllocation hAllocation) = 0;
6407 
6408  // Frees suballocation assigned to given memory region.
6409  virtual void Free(const VmaAllocation allocation) = 0;
6410  virtual void FreeAtOffset(VkDeviceSize offset) = 0;
6411 
6412 protected:
6413  const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
6414 
6415 #if VMA_STATS_STRING_ENABLED
6416  void PrintDetailedMap_Begin(class VmaJsonWriter& json,
6417  VkDeviceSize unusedBytes,
6418  size_t allocationCount,
6419  size_t unusedRangeCount) const;
6420  void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
6421  VkDeviceSize offset,
6422  VmaAllocation hAllocation) const;
6423  void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
6424  VkDeviceSize offset,
6425  VkDeviceSize size) const;
6426  void PrintDetailedMap_End(class VmaJsonWriter& json) const;
6427 #endif
6428 
6429 private:
6430  VkDeviceSize m_Size;
6431  const VkAllocationCallbacks* m_pAllocationCallbacks;
6432 };
6433 
6434 #define VMA_VALIDATE(cond) do { if(!(cond)) { \
6435  VMA_ASSERT(0 && "Validation failed: " #cond); \
6436  return false; \
6437  } } while(false)
6438 
6439 class VmaBlockMetadata_Generic : public VmaBlockMetadata
6440 {
6441  VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic)
6442 public:
6443  VmaBlockMetadata_Generic(VmaAllocator hAllocator);
6444  virtual ~VmaBlockMetadata_Generic();
6445  virtual void Init(VkDeviceSize size);
6446 
6447  virtual bool Validate() const;
6448  virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; }
6449  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6450  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6451  virtual bool IsEmpty() const;
6452 
6453  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6454  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6455 
6456 #if VMA_STATS_STRING_ENABLED
6457  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6458 #endif
6459 
6460  virtual bool CreateAllocationRequest(
6461  uint32_t currentFrameIndex,
6462  uint32_t frameInUseCount,
6463  VkDeviceSize bufferImageGranularity,
6464  VkDeviceSize allocSize,
6465  VkDeviceSize allocAlignment,
6466  bool upperAddress,
6467  VmaSuballocationType allocType,
6468  bool canMakeOtherLost,
6469  uint32_t strategy,
6470  VmaAllocationRequest* pAllocationRequest);
6471 
6472  virtual bool MakeRequestedAllocationsLost(
6473  uint32_t currentFrameIndex,
6474  uint32_t frameInUseCount,
6475  VmaAllocationRequest* pAllocationRequest);
6476 
6477  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6478 
6479  virtual VkResult CheckCorruption(const void* pBlockData);
6480 
6481  virtual void Alloc(
6482  const VmaAllocationRequest& request,
6483  VmaSuballocationType type,
6484  VkDeviceSize allocSize,
6485  VmaAllocation hAllocation);
6486 
6487  virtual void Free(const VmaAllocation allocation);
6488  virtual void FreeAtOffset(VkDeviceSize offset);
6489 
6491  // For defragmentation
6492 
6493  bool IsBufferImageGranularityConflictPossible(
6494  VkDeviceSize bufferImageGranularity,
6495  VmaSuballocationType& inOutPrevSuballocType) const;
6496 
6497 private:
6498  friend class VmaDefragmentationAlgorithm_Generic;
6499  friend class VmaDefragmentationAlgorithm_Fast;
6500 
6501  uint32_t m_FreeCount;
6502  VkDeviceSize m_SumFreeSize;
6503  VmaSuballocationList m_Suballocations;
6504  // Suballocations that are free and have size greater than certain threshold.
6505  // Sorted by size, ascending.
6506  VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize;
6507 
6508  bool ValidateFreeSuballocationList() const;
6509 
6510  // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
6511  // If yes, fills pOffset and returns true. If no, returns false.
6512  bool CheckAllocation(
6513  uint32_t currentFrameIndex,
6514  uint32_t frameInUseCount,
6515  VkDeviceSize bufferImageGranularity,
6516  VkDeviceSize allocSize,
6517  VkDeviceSize allocAlignment,
6518  VmaSuballocationType allocType,
6519  VmaSuballocationList::const_iterator suballocItem,
6520  bool canMakeOtherLost,
6521  VkDeviceSize* pOffset,
6522  size_t* itemsToMakeLostCount,
6523  VkDeviceSize* pSumFreeSize,
6524  VkDeviceSize* pSumItemSize) const;
6525  // Given free suballocation, it merges it with following one, which must also be free.
6526  void MergeFreeWithNext(VmaSuballocationList::iterator item);
6527  // Releases given suballocation, making it free.
6528  // Merges it with adjacent free suballocations if applicable.
6529  // Returns iterator to new free suballocation at this place.
6530  VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
6531  // Given free suballocation, it inserts it into sorted list of
6532  // m_FreeSuballocationsBySize if it's suitable.
6533  void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
6534  // Given free suballocation, it removes it from sorted list of
6535  // m_FreeSuballocationsBySize if it's suitable.
6536  void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
6537 };
6538 
6539 /*
6540 Allocations and their references in internal data structure look like this:
6541 
6542 if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
6543 
6544  0 +-------+
6545  | |
6546  | |
6547  | |
6548  +-------+
6549  | Alloc | 1st[m_1stNullItemsBeginCount]
6550  +-------+
6551  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6552  +-------+
6553  | ... |
6554  +-------+
6555  | Alloc | 1st[1st.size() - 1]
6556  +-------+
6557  | |
6558  | |
6559  | |
6560 GetSize() +-------+
6561 
6562 if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
6563 
6564  0 +-------+
6565  | Alloc | 2nd[0]
6566  +-------+
6567  | Alloc | 2nd[1]
6568  +-------+
6569  | ... |
6570  +-------+
6571  | Alloc | 2nd[2nd.size() - 1]
6572  +-------+
6573  | |
6574  | |
6575  | |
6576  +-------+
6577  | Alloc | 1st[m_1stNullItemsBeginCount]
6578  +-------+
6579  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6580  +-------+
6581  | ... |
6582  +-------+
6583  | Alloc | 1st[1st.size() - 1]
6584  +-------+
6585  | |
6586 GetSize() +-------+
6587 
6588 if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
6589 
6590  0 +-------+
6591  | |
6592  | |
6593  | |
6594  +-------+
6595  | Alloc | 1st[m_1stNullItemsBeginCount]
6596  +-------+
6597  | Alloc | 1st[m_1stNullItemsBeginCount + 1]
6598  +-------+
6599  | ... |
6600  +-------+
6601  | Alloc | 1st[1st.size() - 1]
6602  +-------+
6603  | |
6604  | |
6605  | |
6606  +-------+
6607  | Alloc | 2nd[2nd.size() - 1]
6608  +-------+
6609  | ... |
6610  +-------+
6611  | Alloc | 2nd[1]
6612  +-------+
6613  | Alloc | 2nd[0]
6614 GetSize() +-------+
6615 
6616 */
6617 class VmaBlockMetadata_Linear : public VmaBlockMetadata
6618 {
6619  VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear)
6620 public:
6621  VmaBlockMetadata_Linear(VmaAllocator hAllocator);
6622  virtual ~VmaBlockMetadata_Linear();
6623  virtual void Init(VkDeviceSize size);
6624 
6625  virtual bool Validate() const;
6626  virtual size_t GetAllocationCount() const;
6627  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; }
6628  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6629  virtual bool IsEmpty() const { return GetAllocationCount() == 0; }
6630 
6631  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6632  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6633 
6634 #if VMA_STATS_STRING_ENABLED
6635  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6636 #endif
6637 
6638  virtual bool CreateAllocationRequest(
6639  uint32_t currentFrameIndex,
6640  uint32_t frameInUseCount,
6641  VkDeviceSize bufferImageGranularity,
6642  VkDeviceSize allocSize,
6643  VkDeviceSize allocAlignment,
6644  bool upperAddress,
6645  VmaSuballocationType allocType,
6646  bool canMakeOtherLost,
6647  uint32_t strategy,
6648  VmaAllocationRequest* pAllocationRequest);
6649 
6650  virtual bool MakeRequestedAllocationsLost(
6651  uint32_t currentFrameIndex,
6652  uint32_t frameInUseCount,
6653  VmaAllocationRequest* pAllocationRequest);
6654 
6655  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6656 
6657  virtual VkResult CheckCorruption(const void* pBlockData);
6658 
6659  virtual void Alloc(
6660  const VmaAllocationRequest& request,
6661  VmaSuballocationType type,
6662  VkDeviceSize allocSize,
6663  VmaAllocation hAllocation);
6664 
6665  virtual void Free(const VmaAllocation allocation);
6666  virtual void FreeAtOffset(VkDeviceSize offset);
6667 
6668 private:
6669  /*
6670  There are two suballocation vectors, used in ping-pong way.
6671  The one with index m_1stVectorIndex is called 1st.
6672  The one with index (m_1stVectorIndex ^ 1) is called 2nd.
6673  2nd can be non-empty only when 1st is not empty.
6674  When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
6675  */
6676  typedef VmaVector< VmaSuballocation, VmaStlAllocator<VmaSuballocation> > SuballocationVectorType;
6677 
6678  enum SECOND_VECTOR_MODE
6679  {
6680  SECOND_VECTOR_EMPTY,
6681  /*
6682  Suballocations in 2nd vector are created later than the ones in 1st, but they
6683  all have smaller offset.
6684  */
6685  SECOND_VECTOR_RING_BUFFER,
6686  /*
6687  Suballocations in 2nd vector are upper side of double stack.
6688  They all have offsets higher than those in 1st vector.
6689  Top of this stack means smaller offsets, but higher indices in this vector.
6690  */
6691  SECOND_VECTOR_DOUBLE_STACK,
6692  };
6693 
6694  VkDeviceSize m_SumFreeSize;
6695  SuballocationVectorType m_Suballocations0, m_Suballocations1;
6696  uint32_t m_1stVectorIndex;
6697  SECOND_VECTOR_MODE m_2ndVectorMode;
6698 
6699  SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6700  SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6701  const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
6702  const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
6703 
6704  // Number of items in 1st vector with hAllocation = null at the beginning.
6705  size_t m_1stNullItemsBeginCount;
6706  // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
6707  size_t m_1stNullItemsMiddleCount;
6708  // Number of items in 2nd vector with hAllocation = null.
6709  size_t m_2ndNullItemsCount;
6710 
6711  bool ShouldCompact1st() const;
6712  void CleanupAfterFree();
6713 
6714  bool CreateAllocationRequest_LowerAddress(
6715  uint32_t currentFrameIndex,
6716  uint32_t frameInUseCount,
6717  VkDeviceSize bufferImageGranularity,
6718  VkDeviceSize allocSize,
6719  VkDeviceSize allocAlignment,
6720  VmaSuballocationType allocType,
6721  bool canMakeOtherLost,
6722  uint32_t strategy,
6723  VmaAllocationRequest* pAllocationRequest);
6724  bool CreateAllocationRequest_UpperAddress(
6725  uint32_t currentFrameIndex,
6726  uint32_t frameInUseCount,
6727  VkDeviceSize bufferImageGranularity,
6728  VkDeviceSize allocSize,
6729  VkDeviceSize allocAlignment,
6730  VmaSuballocationType allocType,
6731  bool canMakeOtherLost,
6732  uint32_t strategy,
6733  VmaAllocationRequest* pAllocationRequest);
6734 };
6735 
6736 /*
6737 - GetSize() is the original size of allocated memory block.
6738 - m_UsableSize is this size aligned down to a power of two.
6739  All allocations and calculations happen relative to m_UsableSize.
6740 - GetUnusableSize() is the difference between them.
6741  It is repoted as separate, unused range, not available for allocations.
6742 
6743 Node at level 0 has size = m_UsableSize.
6744 Each next level contains nodes with size 2 times smaller than current level.
6745 m_LevelCount is the maximum number of levels to use in the current object.
6746 */
6747 class VmaBlockMetadata_Buddy : public VmaBlockMetadata
6748 {
6749  VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy)
6750 public:
6751  VmaBlockMetadata_Buddy(VmaAllocator hAllocator);
6752  virtual ~VmaBlockMetadata_Buddy();
6753  virtual void Init(VkDeviceSize size);
6754 
6755  virtual bool Validate() const;
6756  virtual size_t GetAllocationCount() const { return m_AllocationCount; }
6757  virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); }
6758  virtual VkDeviceSize GetUnusedRangeSizeMax() const;
6759  virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; }
6760 
6761  virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const;
6762  virtual void AddPoolStats(VmaPoolStats& inoutStats) const;
6763 
6764 #if VMA_STATS_STRING_ENABLED
6765  virtual void PrintDetailedMap(class VmaJsonWriter& json) const;
6766 #endif
6767 
6768  virtual bool CreateAllocationRequest(
6769  uint32_t currentFrameIndex,
6770  uint32_t frameInUseCount,
6771  VkDeviceSize bufferImageGranularity,
6772  VkDeviceSize allocSize,
6773  VkDeviceSize allocAlignment,
6774  bool upperAddress,
6775  VmaSuballocationType allocType,
6776  bool canMakeOtherLost,
6777  uint32_t strategy,
6778  VmaAllocationRequest* pAllocationRequest);
6779 
6780  virtual bool MakeRequestedAllocationsLost(
6781  uint32_t currentFrameIndex,
6782  uint32_t frameInUseCount,
6783  VmaAllocationRequest* pAllocationRequest);
6784 
6785  virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount);
6786 
6787  virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; }
6788 
6789  virtual void Alloc(
6790  const VmaAllocationRequest& request,
6791  VmaSuballocationType type,
6792  VkDeviceSize allocSize,
6793  VmaAllocation hAllocation);
6794 
6795  virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); }
6796  virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); }
6797 
6798 private:
6799  static const VkDeviceSize MIN_NODE_SIZE = 32;
6800  static const size_t MAX_LEVELS = 30;
6801 
6802  struct ValidationContext
6803  {
6804  size_t calculatedAllocationCount;
6805  size_t calculatedFreeCount;
6806  VkDeviceSize calculatedSumFreeSize;
6807 
6808  ValidationContext() :
6809  calculatedAllocationCount(0),
6810  calculatedFreeCount(0),
6811  calculatedSumFreeSize(0) { }
6812  };
6813 
6814  struct Node
6815  {
6816  VkDeviceSize offset;
6817  enum TYPE
6818  {
6819  TYPE_FREE,
6820  TYPE_ALLOCATION,
6821  TYPE_SPLIT,
6822  TYPE_COUNT
6823  } type;
6824  Node* parent;
6825  Node* buddy;
6826 
6827  union
6828  {
6829  struct
6830  {
6831  Node* prev;
6832  Node* next;
6833  } free;
6834  struct
6835  {
6836  VmaAllocation alloc;
6837  } allocation;
6838  struct
6839  {
6840  Node* leftChild;
6841  } split;
6842  };
6843  };
6844 
6845  // Size of the memory block aligned down to a power of two.
6846  VkDeviceSize m_UsableSize;
6847  uint32_t m_LevelCount;
6848 
6849  Node* m_Root;
6850  struct {
6851  Node* front;
6852  Node* back;
6853  } m_FreeList[MAX_LEVELS];
6854  // Number of nodes in the tree with type == TYPE_ALLOCATION.
6855  size_t m_AllocationCount;
6856  // Number of nodes in the tree with type == TYPE_FREE.
6857  size_t m_FreeCount;
6858  // This includes space wasted due to internal fragmentation. Doesn't include unusable size.
6859  VkDeviceSize m_SumFreeSize;
6860 
6861  VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
6862  void DeleteNode(Node* node);
6863  bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
6864  uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
6865  inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
6866  // Alloc passed just for validation. Can be null.
6867  void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset);
6868  void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const;
6869  // Adds node to the front of FreeList at given level.
6870  // node->type must be FREE.
6871  // node->free.prev, next can be undefined.
6872  void AddToFreeListFront(uint32_t level, Node* node);
6873  // Removes node from FreeList at given level.
6874  // node->type must be FREE.
6875  // node->free.prev, next stay untouched.
6876  void RemoveFromFreeList(uint32_t level, Node* node);
6877 
6878 #if VMA_STATS_STRING_ENABLED
6879  void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
6880 #endif
6881 };
6882 
6883 /*
6884 Represents a single block of device memory (`VkDeviceMemory`) with all the
6885 data about its regions (aka suballocations, #VmaAllocation), assigned and free.
6886 
6887 Thread-safety: This class must be externally synchronized.
6888 */
6889 class VmaDeviceMemoryBlock
6890 {
6891  VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock)
6892 public:
6893  VmaBlockMetadata* m_pMetadata;
6894 
6895  VmaDeviceMemoryBlock(VmaAllocator hAllocator);
6896 
6897  ~VmaDeviceMemoryBlock()
6898  {
6899  VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
6900  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
6901  }
6902 
6903  // Always call after construction.
6904  void Init(
6905  VmaAllocator hAllocator,
6906  VmaPool hParentPool,
6907  uint32_t newMemoryTypeIndex,
6908  VkDeviceMemory newMemory,
6909  VkDeviceSize newSize,
6910  uint32_t id,
6911  uint32_t algorithm);
6912  // Always call before destruction.
6913  void Destroy(VmaAllocator allocator);
6914 
6915  VmaPool GetParentPool() const { return m_hParentPool; }
6916  VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
6917  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
6918  uint32_t GetId() const { return m_Id; }
6919  void* GetMappedData() const { return m_pMappedData; }
6920 
6921  // Validates all data structures inside this object. If not valid, returns false.
6922  bool Validate() const;
6923 
6924  VkResult CheckCorruption(VmaAllocator hAllocator);
6925 
6926  // ppData can be null.
6927  VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
6928  void Unmap(VmaAllocator hAllocator, uint32_t count);
6929 
6930  VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6931  VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
6932 
6933  VkResult BindBufferMemory(
6934  const VmaAllocator hAllocator,
6935  const VmaAllocation hAllocation,
6936  VkDeviceSize allocationLocalOffset,
6937  VkBuffer hBuffer,
6938  const void* pNext);
6939  VkResult BindImageMemory(
6940  const VmaAllocator hAllocator,
6941  const VmaAllocation hAllocation,
6942  VkDeviceSize allocationLocalOffset,
6943  VkImage hImage,
6944  const void* pNext);
6945 
6946 private:
6947  VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
6948  uint32_t m_MemoryTypeIndex;
6949  uint32_t m_Id;
6950  VkDeviceMemory m_hMemory;
6951 
6952  /*
6953  Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
6954  Also protects m_MapCount, m_pMappedData.
6955  Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
6956  */
6957  VMA_MUTEX m_Mutex;
6958  uint32_t m_MapCount;
6959  void* m_pMappedData;
6960 };
6961 
6962 struct VmaPointerLess
6963 {
6964  bool operator()(const void* lhs, const void* rhs) const
6965  {
6966  return lhs < rhs;
6967  }
6968 };
6969 
6970 struct VmaDefragmentationMove
6971 {
6972  size_t srcBlockIndex;
6973  size_t dstBlockIndex;
6974  VkDeviceSize srcOffset;
6975  VkDeviceSize dstOffset;
6976  VkDeviceSize size;
6977  VmaAllocation hAllocation;
6978  VmaDeviceMemoryBlock* pSrcBlock;
6979  VmaDeviceMemoryBlock* pDstBlock;
6980 };
6981 
6982 class VmaDefragmentationAlgorithm;
6983 
6984 /*
6985 Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
6986 Vulkan memory type.
6987 
6988 Synchronized internally with a mutex.
6989 */
6990 struct VmaBlockVector
6991 {
6992  VMA_CLASS_NO_COPY(VmaBlockVector)
6993 public:
6994  VmaBlockVector(
6995  VmaAllocator hAllocator,
6996  VmaPool hParentPool,
6997  uint32_t memoryTypeIndex,
6998  VkDeviceSize preferredBlockSize,
6999  size_t minBlockCount,
7000  size_t maxBlockCount,
7001  VkDeviceSize bufferImageGranularity,
7002  uint32_t frameInUseCount,
7003  bool explicitBlockSize,
7004  uint32_t algorithm);
7005  ~VmaBlockVector();
7006 
7007  VkResult CreateMinBlocks();
7008 
7009  VmaAllocator GetAllocator() const { return m_hAllocator; }
7010  VmaPool GetParentPool() const { return m_hParentPool; }
7011  bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
7012  uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
7013  VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
7014  VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
7015  uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; }
7016  uint32_t GetAlgorithm() const { return m_Algorithm; }
7017 
7018  void GetPoolStats(VmaPoolStats* pStats);
7019 
7020  bool IsEmpty();
7021  bool IsCorruptionDetectionEnabled() const;
7022 
7023  VkResult Allocate(
7024  uint32_t currentFrameIndex,
7025  VkDeviceSize size,
7026  VkDeviceSize alignment,
7027  const VmaAllocationCreateInfo& createInfo,
7028  VmaSuballocationType suballocType,
7029  size_t allocationCount,
7030  VmaAllocation* pAllocations);
7031 
7032  void Free(const VmaAllocation hAllocation);
7033 
7034  // Adds statistics of this BlockVector to pStats.
7035  void AddStats(VmaStats* pStats);
7036 
7037 #if VMA_STATS_STRING_ENABLED
7038  void PrintDetailedMap(class VmaJsonWriter& json);
7039 #endif
7040 
7041  void MakePoolAllocationsLost(
7042  uint32_t currentFrameIndex,
7043  size_t* pLostAllocationCount);
7044  VkResult CheckCorruption();
7045 
7046  // Saves results in pCtx->res.
7047  void Defragment(
7048  class VmaBlockVectorDefragmentationContext* pCtx,
7050  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
7051  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
7052  VkCommandBuffer commandBuffer);
7053  void DefragmentationEnd(
7054  class VmaBlockVectorDefragmentationContext* pCtx,
7055  uint32_t flags,
7056  VmaDefragmentationStats* pStats);
7057 
7058  uint32_t ProcessDefragmentations(
7059  class VmaBlockVectorDefragmentationContext *pCtx,
7060  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves);
7061 
7062  void CommitDefragmentations(
7063  class VmaBlockVectorDefragmentationContext *pCtx,
7064  VmaDefragmentationStats* pStats);
7065 
7067  // To be used only while the m_Mutex is locked. Used during defragmentation.
7068 
7069  size_t GetBlockCount() const { return m_Blocks.size(); }
7070  VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
7071  size_t CalcAllocationCount() const;
7072  bool IsBufferImageGranularityConflictPossible() const;
7073 
7074 private:
7075  friend class VmaDefragmentationAlgorithm_Generic;
7076 
7077  const VmaAllocator m_hAllocator;
7078  const VmaPool m_hParentPool;
7079  const uint32_t m_MemoryTypeIndex;
7080  const VkDeviceSize m_PreferredBlockSize;
7081  const size_t m_MinBlockCount;
7082  const size_t m_MaxBlockCount;
7083  const VkDeviceSize m_BufferImageGranularity;
7084  const uint32_t m_FrameInUseCount;
7085  const bool m_ExplicitBlockSize;
7086  const uint32_t m_Algorithm;
7087  VMA_RW_MUTEX m_Mutex;
7088 
7089  /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) -
7090  a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */
7091  bool m_HasEmptyBlock;
7092  // Incrementally sorted by sumFreeSize, ascending.
7093  VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*> > m_Blocks;
7094  uint32_t m_NextBlockId;
7095 
7096  VkDeviceSize CalcMaxBlockSize() const;
7097 
7098  // Finds and removes given block from vector.
7099  void Remove(VmaDeviceMemoryBlock* pBlock);
7100 
7101  // Performs single step in sorting m_Blocks. They may not be fully sorted
7102  // after this call.
7103  void IncrementallySortBlocks();
7104 
7105  VkResult AllocatePage(
7106  uint32_t currentFrameIndex,
7107  VkDeviceSize size,
7108  VkDeviceSize alignment,
7109  const VmaAllocationCreateInfo& createInfo,
7110  VmaSuballocationType suballocType,
7111  VmaAllocation* pAllocation);
7112 
7113  // To be used only without CAN_MAKE_OTHER_LOST flag.
7114  VkResult AllocateFromBlock(
7115  VmaDeviceMemoryBlock* pBlock,
7116  uint32_t currentFrameIndex,
7117  VkDeviceSize size,
7118  VkDeviceSize alignment,
7119  VmaAllocationCreateFlags allocFlags,
7120  void* pUserData,
7121  VmaSuballocationType suballocType,
7122  uint32_t strategy,
7123  VmaAllocation* pAllocation);
7124 
7125  VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
7126 
7127  // Saves result to pCtx->res.
7128  void ApplyDefragmentationMovesCpu(
7129  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7130  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves);
7131  // Saves result to pCtx->res.
7132  void ApplyDefragmentationMovesGpu(
7133  class VmaBlockVectorDefragmentationContext* pDefragCtx,
7134  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7135  VkCommandBuffer commandBuffer);
7136 
7137  /*
7138  Used during defragmentation. pDefragmentationStats is optional. It's in/out
7139  - updated with new data.
7140  */
7141  void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats);
7142 
7143  void UpdateHasEmptyBlock();
7144 };
7145 
7146 struct VmaPool_T
7147 {
7148  VMA_CLASS_NO_COPY(VmaPool_T)
7149 public:
7150  VmaBlockVector m_BlockVector;
7151 
7152  VmaPool_T(
7153  VmaAllocator hAllocator,
7154  const VmaPoolCreateInfo& createInfo,
7155  VkDeviceSize preferredBlockSize);
7156  ~VmaPool_T();
7157 
7158  uint32_t GetId() const { return m_Id; }
7159  void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
7160 
7161  const char* GetName() const { return m_Name; }
7162  void SetName(const char* pName);
7163 
7164 #if VMA_STATS_STRING_ENABLED
7165  //void PrintDetailedMap(class VmaStringBuilder& sb);
7166 #endif
7167 
7168 private:
7169  uint32_t m_Id;
7170  char* m_Name;
7171 };
7172 
7173 /*
7174 Performs defragmentation:
7175 
7176 - Updates `pBlockVector->m_pMetadata`.
7177 - Updates allocations by calling ChangeBlockAllocation() or ChangeOffset().
7178 - Does not move actual data, only returns requested moves as `moves`.
7179 */
7180 class VmaDefragmentationAlgorithm
7181 {
7182  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm)
7183 public:
7184  VmaDefragmentationAlgorithm(
7185  VmaAllocator hAllocator,
7186  VmaBlockVector* pBlockVector,
7187  uint32_t currentFrameIndex) :
7188  m_hAllocator(hAllocator),
7189  m_pBlockVector(pBlockVector),
7190  m_CurrentFrameIndex(currentFrameIndex)
7191  {
7192  }
7193  virtual ~VmaDefragmentationAlgorithm()
7194  {
7195  }
7196 
7197  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0;
7198  virtual void AddAll() = 0;
7199 
7200  virtual VkResult Defragment(
7201  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7202  VkDeviceSize maxBytesToMove,
7203  uint32_t maxAllocationsToMove,
7204  VmaDefragmentationFlags flags) = 0;
7205 
7206  virtual VkDeviceSize GetBytesMoved() const = 0;
7207  virtual uint32_t GetAllocationsMoved() const = 0;
7208 
7209 protected:
7210  VmaAllocator const m_hAllocator;
7211  VmaBlockVector* const m_pBlockVector;
7212  const uint32_t m_CurrentFrameIndex;
7213 
7214  struct AllocationInfo
7215  {
7216  VmaAllocation m_hAllocation;
7217  VkBool32* m_pChanged;
7218 
7219  AllocationInfo() :
7220  m_hAllocation(VK_NULL_HANDLE),
7221  m_pChanged(VMA_NULL)
7222  {
7223  }
7224  AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) :
7225  m_hAllocation(hAlloc),
7226  m_pChanged(pChanged)
7227  {
7228  }
7229  };
7230 };
7231 
7232 class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm
7233 {
7234  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic)
7235 public:
7236  VmaDefragmentationAlgorithm_Generic(
7237  VmaAllocator hAllocator,
7238  VmaBlockVector* pBlockVector,
7239  uint32_t currentFrameIndex,
7240  bool overlappingMoveSupported);
7241  virtual ~VmaDefragmentationAlgorithm_Generic();
7242 
7243  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7244  virtual void AddAll() { m_AllAllocations = true; }
7245 
7246  virtual VkResult Defragment(
7247  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7248  VkDeviceSize maxBytesToMove,
7249  uint32_t maxAllocationsToMove,
7250  VmaDefragmentationFlags flags);
7251 
7252  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7253  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7254 
7255 private:
7256  uint32_t m_AllocationCount;
7257  bool m_AllAllocations;
7258 
7259  VkDeviceSize m_BytesMoved;
7260  uint32_t m_AllocationsMoved;
7261 
7262  struct AllocationInfoSizeGreater
7263  {
7264  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7265  {
7266  return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize();
7267  }
7268  };
7269 
7270  struct AllocationInfoOffsetGreater
7271  {
7272  bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const
7273  {
7274  return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset();
7275  }
7276  };
7277 
7278  struct BlockInfo
7279  {
7280  size_t m_OriginalBlockIndex;
7281  VmaDeviceMemoryBlock* m_pBlock;
7282  bool m_HasNonMovableAllocations;
7283  VmaVector< AllocationInfo, VmaStlAllocator<AllocationInfo> > m_Allocations;
7284 
7285  BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) :
7286  m_OriginalBlockIndex(SIZE_MAX),
7287  m_pBlock(VMA_NULL),
7288  m_HasNonMovableAllocations(true),
7289  m_Allocations(pAllocationCallbacks)
7290  {
7291  }
7292 
7293  void CalcHasNonMovableAllocations()
7294  {
7295  const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount();
7296  const size_t defragmentAllocCount = m_Allocations.size();
7297  m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount;
7298  }
7299 
7300  void SortAllocationsBySizeDescending()
7301  {
7302  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater());
7303  }
7304 
7305  void SortAllocationsByOffsetDescending()
7306  {
7307  VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater());
7308  }
7309  };
7310 
7311  struct BlockPointerLess
7312  {
7313  bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const
7314  {
7315  return pLhsBlockInfo->m_pBlock < pRhsBlock;
7316  }
7317  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7318  {
7319  return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock;
7320  }
7321  };
7322 
7323  // 1. Blocks with some non-movable allocations go first.
7324  // 2. Blocks with smaller sumFreeSize go first.
7325  struct BlockInfoCompareMoveDestination
7326  {
7327  bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const
7328  {
7329  if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations)
7330  {
7331  return true;
7332  }
7333  if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations)
7334  {
7335  return false;
7336  }
7337  if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize())
7338  {
7339  return true;
7340  }
7341  return false;
7342  }
7343  };
7344 
7345  typedef VmaVector< BlockInfo*, VmaStlAllocator<BlockInfo*> > BlockInfoVector;
7346  BlockInfoVector m_Blocks;
7347 
7348  VkResult DefragmentRound(
7349  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7350  VkDeviceSize maxBytesToMove,
7351  uint32_t maxAllocationsToMove,
7352  bool freeOldAllocations);
7353 
7354  size_t CalcBlocksWithNonMovableCount() const;
7355 
7356  static bool MoveMakesSense(
7357  size_t dstBlockIndex, VkDeviceSize dstOffset,
7358  size_t srcBlockIndex, VkDeviceSize srcOffset);
7359 };
7360 
7361 class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm
7362 {
7363  VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast)
7364 public:
7365  VmaDefragmentationAlgorithm_Fast(
7366  VmaAllocator hAllocator,
7367  VmaBlockVector* pBlockVector,
7368  uint32_t currentFrameIndex,
7369  bool overlappingMoveSupported);
7370  virtual ~VmaDefragmentationAlgorithm_Fast();
7371 
7372  virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; }
7373  virtual void AddAll() { m_AllAllocations = true; }
7374 
7375  virtual VkResult Defragment(
7376  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
7377  VkDeviceSize maxBytesToMove,
7378  uint32_t maxAllocationsToMove,
7379  VmaDefragmentationFlags flags);
7380 
7381  virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; }
7382  virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; }
7383 
7384 private:
7385  struct BlockInfo
7386  {
7387  size_t origBlockIndex;
7388  };
7389 
7390  class FreeSpaceDatabase
7391  {
7392  public:
7393  FreeSpaceDatabase()
7394  {
7395  FreeSpace s = {};
7396  s.blockInfoIndex = SIZE_MAX;
7397  for(size_t i = 0; i < MAX_COUNT; ++i)
7398  {
7399  m_FreeSpaces[i] = s;
7400  }
7401  }
7402 
7403  void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size)
7404  {
7405  if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7406  {
7407  return;
7408  }
7409 
7410  // Find first invalid or the smallest structure.
7411  size_t bestIndex = SIZE_MAX;
7412  for(size_t i = 0; i < MAX_COUNT; ++i)
7413  {
7414  // Empty structure.
7415  if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX)
7416  {
7417  bestIndex = i;
7418  break;
7419  }
7420  if(m_FreeSpaces[i].size < size &&
7421  (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size))
7422  {
7423  bestIndex = i;
7424  }
7425  }
7426 
7427  if(bestIndex != SIZE_MAX)
7428  {
7429  m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex;
7430  m_FreeSpaces[bestIndex].offset = offset;
7431  m_FreeSpaces[bestIndex].size = size;
7432  }
7433  }
7434 
7435  bool Fetch(VkDeviceSize alignment, VkDeviceSize size,
7436  size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset)
7437  {
7438  size_t bestIndex = SIZE_MAX;
7439  VkDeviceSize bestFreeSpaceAfter = 0;
7440  for(size_t i = 0; i < MAX_COUNT; ++i)
7441  {
7442  // Structure is valid.
7443  if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX)
7444  {
7445  const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment);
7446  // Allocation fits into this structure.
7447  if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size)
7448  {
7449  const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) -
7450  (dstOffset + size);
7451  if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter)
7452  {
7453  bestIndex = i;
7454  bestFreeSpaceAfter = freeSpaceAfter;
7455  }
7456  }
7457  }
7458  }
7459 
7460  if(bestIndex != SIZE_MAX)
7461  {
7462  outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex;
7463  outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment);
7464 
7465  if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
7466  {
7467  // Leave this structure for remaining empty space.
7468  const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size;
7469  m_FreeSpaces[bestIndex].offset += alignmentPlusSize;
7470  m_FreeSpaces[bestIndex].size -= alignmentPlusSize;
7471  }
7472  else
7473  {
7474  // This structure becomes invalid.
7475  m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX;
7476  }
7477 
7478  return true;
7479  }
7480 
7481  return false;
7482  }
7483 
7484  private:
7485  static const size_t MAX_COUNT = 4;
7486 
7487  struct FreeSpace
7488  {
7489  size_t blockInfoIndex; // SIZE_MAX means this structure is invalid.
7490  VkDeviceSize offset;
7491  VkDeviceSize size;
7492  } m_FreeSpaces[MAX_COUNT];
7493  };
7494 
7495  const bool m_OverlappingMoveSupported;
7496 
7497  uint32_t m_AllocationCount;
7498  bool m_AllAllocations;
7499 
7500  VkDeviceSize m_BytesMoved;
7501  uint32_t m_AllocationsMoved;
7502 
7503  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> > m_BlockInfos;
7504 
7505  void PreprocessMetadata();
7506  void PostprocessMetadata();
7507  void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc);
7508 };
7509 
7510 struct VmaBlockDefragmentationContext
7511 {
7512  enum BLOCK_FLAG
7513  {
7514  BLOCK_FLAG_USED = 0x00000001,
7515  };
7516  uint32_t flags;
7517  VkBuffer hBuffer;
7518 };
7519 
7520 class VmaBlockVectorDefragmentationContext
7521 {
7522  VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext)
7523 public:
7524  VkResult res;
7525  bool mutexLocked;
7526  VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator<VmaBlockDefragmentationContext> > blockContexts;
7527  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> > defragmentationMoves;
7528  uint32_t defragmentationMovesProcessed;
7529  uint32_t defragmentationMovesCommitted;
7530  bool hasDefragmentationPlan;
7531 
7532  VmaBlockVectorDefragmentationContext(
7533  VmaAllocator hAllocator,
7534  VmaPool hCustomPool, // Optional.
7535  VmaBlockVector* pBlockVector,
7536  uint32_t currFrameIndex);
7537  ~VmaBlockVectorDefragmentationContext();
7538 
7539  VmaPool GetCustomPool() const { return m_hCustomPool; }
7540  VmaBlockVector* GetBlockVector() const { return m_pBlockVector; }
7541  VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; }
7542 
7543  void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged);
7544  void AddAll() { m_AllAllocations = true; }
7545 
7546  void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags);
7547 
7548 private:
7549  const VmaAllocator m_hAllocator;
7550  // Null if not from custom pool.
7551  const VmaPool m_hCustomPool;
7552  // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors.
7553  VmaBlockVector* const m_pBlockVector;
7554  const uint32_t m_CurrFrameIndex;
7555  // Owner of this object.
7556  VmaDefragmentationAlgorithm* m_pAlgorithm;
7557 
7558  struct AllocInfo
7559  {
7560  VmaAllocation hAlloc;
7561  VkBool32* pChanged;
7562  };
7563  // Used between constructor and Begin.
7564  VmaVector< AllocInfo, VmaStlAllocator<AllocInfo> > m_Allocations;
7565  bool m_AllAllocations;
7566 };
7567 
7568 struct VmaDefragmentationContext_T
7569 {
7570 private:
7571  VMA_CLASS_NO_COPY(VmaDefragmentationContext_T)
7572 public:
7573  VmaDefragmentationContext_T(
7574  VmaAllocator hAllocator,
7575  uint32_t currFrameIndex,
7576  uint32_t flags,
7577  VmaDefragmentationStats* pStats);
7578  ~VmaDefragmentationContext_T();
7579 
7580  void AddPools(uint32_t poolCount, const VmaPool* pPools);
7581  void AddAllocations(
7582  uint32_t allocationCount,
7583  const VmaAllocation* pAllocations,
7584  VkBool32* pAllocationsChanged);
7585 
7586  /*
7587  Returns:
7588  - `VK_SUCCESS` if succeeded and object can be destroyed immediately.
7589  - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd().
7590  - Negative value if error occured and object can be destroyed immediately.
7591  */
7592  VkResult Defragment(
7593  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
7594  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
7595  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags);
7596 
7597  VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo);
7598  VkResult DefragmentPassEnd();
7599 
7600 private:
7601  const VmaAllocator m_hAllocator;
7602  const uint32_t m_CurrFrameIndex;
7603  const uint32_t m_Flags;
7604  VmaDefragmentationStats* const m_pStats;
7605 
7606  VkDeviceSize m_MaxCpuBytesToMove;
7607  uint32_t m_MaxCpuAllocationsToMove;
7608  VkDeviceSize m_MaxGpuBytesToMove;
7609  uint32_t m_MaxGpuAllocationsToMove;
7610 
7611  // Owner of these objects.
7612  VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES];
7613  // Owner of these objects.
7614  VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator<VmaBlockVectorDefragmentationContext*> > m_CustomPoolContexts;
7615 };
7616 
7617 #if VMA_RECORDING_ENABLED
7618 
7619 class VmaRecorder
7620 {
7621 public:
7622  VmaRecorder();
7623  VkResult Init(const VmaRecordSettings& settings, bool useMutex);
7624  void WriteConfiguration(
7625  const VkPhysicalDeviceProperties& devProps,
7626  const VkPhysicalDeviceMemoryProperties& memProps,
7627  uint32_t vulkanApiVersion,
7628  bool dedicatedAllocationExtensionEnabled,
7629  bool bindMemory2ExtensionEnabled,
7630  bool memoryBudgetExtensionEnabled,
7631  bool deviceCoherentMemoryExtensionEnabled);
7632  ~VmaRecorder();
7633 
7634  void RecordCreateAllocator(uint32_t frameIndex);
7635  void RecordDestroyAllocator(uint32_t frameIndex);
7636  void RecordCreatePool(uint32_t frameIndex,
7637  const VmaPoolCreateInfo& createInfo,
7638  VmaPool pool);
7639  void RecordDestroyPool(uint32_t frameIndex, VmaPool pool);
7640  void RecordAllocateMemory(uint32_t frameIndex,
7641  const VkMemoryRequirements& vkMemReq,
7642  const VmaAllocationCreateInfo& createInfo,
7643  VmaAllocation allocation);
7644  void RecordAllocateMemoryPages(uint32_t frameIndex,
7645  const VkMemoryRequirements& vkMemReq,
7646  const VmaAllocationCreateInfo& createInfo,
7647  uint64_t allocationCount,
7648  const VmaAllocation* pAllocations);
7649  void RecordAllocateMemoryForBuffer(uint32_t frameIndex,
7650  const VkMemoryRequirements& vkMemReq,
7651  bool requiresDedicatedAllocation,
7652  bool prefersDedicatedAllocation,
7653  const VmaAllocationCreateInfo& createInfo,
7654  VmaAllocation allocation);
7655  void RecordAllocateMemoryForImage(uint32_t frameIndex,
7656  const VkMemoryRequirements& vkMemReq,
7657  bool requiresDedicatedAllocation,
7658  bool prefersDedicatedAllocation,
7659  const VmaAllocationCreateInfo& createInfo,
7660  VmaAllocation allocation);
7661  void RecordFreeMemory(uint32_t frameIndex,
7662  VmaAllocation allocation);
7663  void RecordFreeMemoryPages(uint32_t frameIndex,
7664  uint64_t allocationCount,
7665  const VmaAllocation* pAllocations);
7666  void RecordSetAllocationUserData(uint32_t frameIndex,
7667  VmaAllocation allocation,
7668  const void* pUserData);
7669  void RecordCreateLostAllocation(uint32_t frameIndex,
7670  VmaAllocation allocation);
7671  void RecordMapMemory(uint32_t frameIndex,
7672  VmaAllocation allocation);
7673  void RecordUnmapMemory(uint32_t frameIndex,
7674  VmaAllocation allocation);
7675  void RecordFlushAllocation(uint32_t frameIndex,
7676  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7677  void RecordInvalidateAllocation(uint32_t frameIndex,
7678  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size);
7679  void RecordCreateBuffer(uint32_t frameIndex,
7680  const VkBufferCreateInfo& bufCreateInfo,
7681  const VmaAllocationCreateInfo& allocCreateInfo,
7682  VmaAllocation allocation);
7683  void RecordCreateImage(uint32_t frameIndex,
7684  const VkImageCreateInfo& imageCreateInfo,
7685  const VmaAllocationCreateInfo& allocCreateInfo,
7686  VmaAllocation allocation);
7687  void RecordDestroyBuffer(uint32_t frameIndex,
7688  VmaAllocation allocation);
7689  void RecordDestroyImage(uint32_t frameIndex,
7690  VmaAllocation allocation);
7691  void RecordTouchAllocation(uint32_t frameIndex,
7692  VmaAllocation allocation);
7693  void RecordGetAllocationInfo(uint32_t frameIndex,
7694  VmaAllocation allocation);
7695  void RecordMakePoolAllocationsLost(uint32_t frameIndex,
7696  VmaPool pool);
7697  void RecordDefragmentationBegin(uint32_t frameIndex,
7698  const VmaDefragmentationInfo2& info,
7700  void RecordDefragmentationEnd(uint32_t frameIndex,
7702  void RecordSetPoolName(uint32_t frameIndex,
7703  VmaPool pool,
7704  const char* name);
7705 
7706 private:
7707  struct CallParams
7708  {
7709  uint32_t threadId;
7710  double time;
7711  };
7712 
7713  class UserDataString
7714  {
7715  public:
7716  UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData);
7717  const char* GetString() const { return m_Str; }
7718 
7719  private:
7720  char m_PtrStr[17];
7721  const char* m_Str;
7722  };
7723 
7724  bool m_UseMutex;
7725  VmaRecordFlags m_Flags;
7726  FILE* m_File;
7727  VMA_MUTEX m_FileMutex;
7728  std::chrono::time_point<std::chrono::high_resolution_clock> m_RecordingStartTime;
7729 
7730  void GetBasicParams(CallParams& outParams);
7731 
7732  // T must be a pointer type, e.g. VmaAllocation, VmaPool.
7733  template<typename T>
7734  void PrintPointerList(uint64_t count, const T* pItems)
7735  {
7736  if(count)
7737  {
7738  fprintf(m_File, "%p", pItems[0]);
7739  for(uint64_t i = 1; i < count; ++i)
7740  {
7741  fprintf(m_File, " %p", pItems[i]);
7742  }
7743  }
7744  }
7745 
7746  void PrintPointerList(uint64_t count, const VmaAllocation* pItems);
7747  void Flush();
7748 };
7749 
7750 #endif // #if VMA_RECORDING_ENABLED
7751 
7752 /*
7753 Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
7754 */
7755 class VmaAllocationObjectAllocator
7756 {
7757  VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator)
7758 public:
7759  VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks);
7760 
7761  template<typename... Types> VmaAllocation Allocate(Types... args);
7762  void Free(VmaAllocation hAlloc);
7763 
7764 private:
7765  VMA_MUTEX m_Mutex;
7766  VmaPoolAllocator<VmaAllocation_T> m_Allocator;
7767 };
7768 
7769 struct VmaCurrentBudgetData
7770 {
7771  VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
7772  VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
7773 
7774 #if VMA_MEMORY_BUDGET
7775  VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
7776  VMA_RW_MUTEX m_BudgetMutex;
7777  uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
7778  uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
7779  uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
7780 #endif // #if VMA_MEMORY_BUDGET
7781 
7782  VmaCurrentBudgetData()
7783  {
7784  for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
7785  {
7786  m_BlockBytes[heapIndex] = 0;
7787  m_AllocationBytes[heapIndex] = 0;
7788 #if VMA_MEMORY_BUDGET
7789  m_VulkanUsage[heapIndex] = 0;
7790  m_VulkanBudget[heapIndex] = 0;
7791  m_BlockBytesAtBudgetFetch[heapIndex] = 0;
7792 #endif
7793  }
7794 
7795 #if VMA_MEMORY_BUDGET
7796  m_OperationsSinceBudgetFetch = 0;
7797 #endif
7798  }
7799 
7800  void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7801  {
7802  m_AllocationBytes[heapIndex] += allocationSize;
7803 #if VMA_MEMORY_BUDGET
7804  ++m_OperationsSinceBudgetFetch;
7805 #endif
7806  }
7807 
7808  void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
7809  {
7810  VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME
7811  m_AllocationBytes[heapIndex] -= allocationSize;
7812 #if VMA_MEMORY_BUDGET
7813  ++m_OperationsSinceBudgetFetch;
7814 #endif
7815  }
7816 };
7817 
7818 // Main allocator object.
7819 struct VmaAllocator_T
7820 {
7821  VMA_CLASS_NO_COPY(VmaAllocator_T)
7822 public:
7823  bool m_UseMutex;
7824  uint32_t m_VulkanApiVersion;
7825  bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7826  bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
7827  bool m_UseExtMemoryBudget;
7828  bool m_UseAmdDeviceCoherentMemory;
7829  bool m_UseKhrBufferDeviceAddress;
7830  VkDevice m_hDevice;
7831  VkInstance m_hInstance;
7832  bool m_AllocationCallbacksSpecified;
7833  VkAllocationCallbacks m_AllocationCallbacks;
7834  VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
7835  VmaAllocationObjectAllocator m_AllocationObjectAllocator;
7836 
7837  // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
7838  uint32_t m_HeapSizeLimitMask;
7839 
7840  VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
7841  VkPhysicalDeviceMemoryProperties m_MemProps;
7842 
7843  // Default pools.
7844  VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
7845 
7846  // Each vector is sorted by memory (handle value).
7847  typedef VmaVector< VmaAllocation, VmaStlAllocator<VmaAllocation> > AllocationVectorType;
7848  AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES];
7849  VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES];
7850 
7851  VmaCurrentBudgetData m_Budget;
7852 
7853  VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
7854  VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
7855  ~VmaAllocator_T();
7856 
7857  const VkAllocationCallbacks* GetAllocationCallbacks() const
7858  {
7859  return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0;
7860  }
7861  const VmaVulkanFunctions& GetVulkanFunctions() const
7862  {
7863  return m_VulkanFunctions;
7864  }
7865 
7866  VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
7867 
7868  VkDeviceSize GetBufferImageGranularity() const
7869  {
7870  return VMA_MAX(
7871  static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
7872  m_PhysicalDeviceProperties.limits.bufferImageGranularity);
7873  }
7874 
7875  uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
7876  uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
7877 
7878  uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
7879  {
7880  VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
7881  return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
7882  }
7883  // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
7884  bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
7885  {
7886  return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
7887  VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
7888  }
7889  // Minimum alignment for all allocations in specific memory type.
7890  VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
7891  {
7892  return IsMemoryTypeNonCoherent(memTypeIndex) ?
7893  VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
7894  (VkDeviceSize)VMA_DEBUG_ALIGNMENT;
7895  }
7896 
7897  bool IsIntegratedGpu() const
7898  {
7899  return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
7900  }
7901 
7902  uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
7903 
7904 #if VMA_RECORDING_ENABLED
7905  VmaRecorder* GetRecorder() const { return m_pRecorder; }
7906 #endif
7907 
7908  void GetBufferMemoryRequirements(
7909  VkBuffer hBuffer,
7910  VkMemoryRequirements& memReq,
7911  bool& requiresDedicatedAllocation,
7912  bool& prefersDedicatedAllocation) const;
7913  void GetImageMemoryRequirements(
7914  VkImage hImage,
7915  VkMemoryRequirements& memReq,
7916  bool& requiresDedicatedAllocation,
7917  bool& prefersDedicatedAllocation) const;
7918 
7919  // Main allocation function.
7920  VkResult AllocateMemory(
7921  const VkMemoryRequirements& vkMemReq,
7922  bool requiresDedicatedAllocation,
7923  bool prefersDedicatedAllocation,
7924  VkBuffer dedicatedBuffer,
7925  VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown.
7926  VkImage dedicatedImage,
7927  const VmaAllocationCreateInfo& createInfo,
7928  VmaSuballocationType suballocType,
7929  size_t allocationCount,
7930  VmaAllocation* pAllocations);
7931 
7932  // Main deallocation function.
7933  void FreeMemory(
7934  size_t allocationCount,
7935  const VmaAllocation* pAllocations);
7936 
7937  VkResult ResizeAllocation(
7938  const VmaAllocation alloc,
7939  VkDeviceSize newSize);
7940 
7941  void CalculateStats(VmaStats* pStats);
7942 
7943  void GetBudget(
7944  VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount);
7945 
7946 #if VMA_STATS_STRING_ENABLED
7947  void PrintDetailedMap(class VmaJsonWriter& json);
7948 #endif
7949 
7950  VkResult DefragmentationBegin(
7951  const VmaDefragmentationInfo2& info,
7952  VmaDefragmentationStats* pStats,
7953  VmaDefragmentationContext* pContext);
7954  VkResult DefragmentationEnd(
7955  VmaDefragmentationContext context);
7956 
7957  VkResult DefragmentationPassBegin(
7959  VmaDefragmentationContext context);
7960  VkResult DefragmentationPassEnd(
7961  VmaDefragmentationContext context);
7962 
7963  void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
7964  bool TouchAllocation(VmaAllocation hAllocation);
7965 
7966  VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
7967  void DestroyPool(VmaPool pool);
7968  void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats);
7969 
7970  void SetCurrentFrameIndex(uint32_t frameIndex);
7971  uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
7972 
7973  void MakePoolAllocationsLost(
7974  VmaPool hPool,
7975  size_t* pLostAllocationCount);
7976  VkResult CheckPoolCorruption(VmaPool hPool);
7977  VkResult CheckCorruption(uint32_t memoryTypeBits);
7978 
7979  void CreateLostAllocation(VmaAllocation* pAllocation);
7980 
7981  // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
7982  VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
7983  // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
7984  void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
7985  // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
7986  VkResult BindVulkanBuffer(
7987  VkDeviceMemory memory,
7988  VkDeviceSize memoryOffset,
7989  VkBuffer buffer,
7990  const void* pNext);
7991  // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
7992  VkResult BindVulkanImage(
7993  VkDeviceMemory memory,
7994  VkDeviceSize memoryOffset,
7995  VkImage image,
7996  const void* pNext);
7997 
7998  VkResult Map(VmaAllocation hAllocation, void** ppData);
7999  void Unmap(VmaAllocation hAllocation);
8000 
8001  VkResult BindBufferMemory(
8002  VmaAllocation hAllocation,
8003  VkDeviceSize allocationLocalOffset,
8004  VkBuffer hBuffer,
8005  const void* pNext);
8006  VkResult BindImageMemory(
8007  VmaAllocation hAllocation,
8008  VkDeviceSize allocationLocalOffset,
8009  VkImage hImage,
8010  const void* pNext);
8011 
8012  VkResult FlushOrInvalidateAllocation(
8013  VmaAllocation hAllocation,
8014  VkDeviceSize offset, VkDeviceSize size,
8015  VMA_CACHE_OPERATION op);
8016  VkResult FlushOrInvalidateAllocations(
8017  uint32_t allocationCount,
8018  const VmaAllocation* allocations,
8019  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
8020  VMA_CACHE_OPERATION op);
8021 
8022  void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
8023 
8024  /*
8025  Returns bit mask of memory types that can support defragmentation on GPU as
8026  they support creation of required buffer for copy operations.
8027  */
8028  uint32_t GetGpuDefragmentationMemoryTypeBits();
8029 
8030 private:
8031  VkDeviceSize m_PreferredLargeHeapBlockSize;
8032 
8033  VkPhysicalDevice m_PhysicalDevice;
8034  VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
8035  VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
8036 
8037  VMA_RW_MUTEX m_PoolsMutex;
8038  // Protected by m_PoolsMutex. Sorted by pointer value.
8039  VmaVector<VmaPool, VmaStlAllocator<VmaPool> > m_Pools;
8040  uint32_t m_NextPoolId;
8041 
8042  VmaVulkanFunctions m_VulkanFunctions;
8043 
8044  // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
8045  uint32_t m_GlobalMemoryTypeBits;
8046 
8047 #if VMA_RECORDING_ENABLED
8048  VmaRecorder* m_pRecorder;
8049 #endif
8050 
8051  void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
8052 
8053 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
8054  void ImportVulkanFunctions_Static();
8055 #endif
8056 
8057  void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
8058 
8059 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
8060  void ImportVulkanFunctions_Dynamic();
8061 #endif
8062 
8063  void ValidateVulkanFunctions();
8064 
8065  VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
8066 
8067  VkResult AllocateMemoryOfType(
8068  VkDeviceSize size,
8069  VkDeviceSize alignment,
8070  bool dedicatedAllocation,
8071  VkBuffer dedicatedBuffer,
8072  VkBufferUsageFlags dedicatedBufferUsage,
8073  VkImage dedicatedImage,
8074  const VmaAllocationCreateInfo& createInfo,
8075  uint32_t memTypeIndex,
8076  VmaSuballocationType suballocType,
8077  size_t allocationCount,
8078  VmaAllocation* pAllocations);
8079 
8080  // Helper function only to be used inside AllocateDedicatedMemory.
8081  VkResult AllocateDedicatedMemoryPage(
8082  VkDeviceSize size,
8083  VmaSuballocationType suballocType,
8084  uint32_t memTypeIndex,
8085  const VkMemoryAllocateInfo& allocInfo,
8086  bool map,
8087  bool isUserDataString,
8088  void* pUserData,
8089  VmaAllocation* pAllocation);
8090 
8091  // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
8092  VkResult AllocateDedicatedMemory(
8093  VkDeviceSize size,
8094  VmaSuballocationType suballocType,
8095  uint32_t memTypeIndex,
8096  bool withinBudget,
8097  bool map,
8098  bool isUserDataString,
8099  void* pUserData,
8100  VkBuffer dedicatedBuffer,
8101  VkBufferUsageFlags dedicatedBufferUsage,
8102  VkImage dedicatedImage,
8103  size_t allocationCount,
8104  VmaAllocation* pAllocations);
8105 
8106  void FreeDedicatedMemory(const VmaAllocation allocation);
8107 
8108  /*
8109  Calculates and returns bit mask of memory types that can support defragmentation
8110  on GPU as they support creation of required buffer for copy operations.
8111  */
8112  uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
8113 
8114  uint32_t CalculateGlobalMemoryTypeBits() const;
8115 
8116  bool GetFlushOrInvalidateRange(
8117  VmaAllocation allocation,
8118  VkDeviceSize offset, VkDeviceSize size,
8119  VkMappedMemoryRange& outRange) const;
8120 
8121 #if VMA_MEMORY_BUDGET
8122  void UpdateVulkanBudget();
8123 #endif // #if VMA_MEMORY_BUDGET
8124 };
8125 
8127 // Memory allocation #2 after VmaAllocator_T definition
8128 
8129 static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
8130 {
8131  return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
8132 }
8133 
8134 static void VmaFree(VmaAllocator hAllocator, void* ptr)
8135 {
8136  VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
8137 }
8138 
8139 template<typename T>
8140 static T* VmaAllocate(VmaAllocator hAllocator)
8141 {
8142  return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
8143 }
8144 
8145 template<typename T>
8146 static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
8147 {
8148  return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
8149 }
8150 
8151 template<typename T>
8152 static void vma_delete(VmaAllocator hAllocator, T* ptr)
8153 {
8154  if(ptr != VMA_NULL)
8155  {
8156  ptr->~T();
8157  VmaFree(hAllocator, ptr);
8158  }
8159 }
8160 
8161 template<typename T>
8162 static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
8163 {
8164  if(ptr != VMA_NULL)
8165  {
8166  for(size_t i = count; i--; )
8167  ptr[i].~T();
8168  VmaFree(hAllocator, ptr);
8169  }
8170 }
8171 
8173 // VmaStringBuilder
8174 
8175 #if VMA_STATS_STRING_ENABLED
8176 
8177 class VmaStringBuilder
8178 {
8179 public:
8180  VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator<char>(alloc->GetAllocationCallbacks())) { }
8181  size_t GetLength() const { return m_Data.size(); }
8182  const char* GetData() const { return m_Data.data(); }
8183 
8184  void Add(char ch) { m_Data.push_back(ch); }
8185  void Add(const char* pStr);
8186  void AddNewLine() { Add('\n'); }
8187  void AddNumber(uint32_t num);
8188  void AddNumber(uint64_t num);
8189  void AddPointer(const void* ptr);
8190 
8191 private:
8192  VmaVector< char, VmaStlAllocator<char> > m_Data;
8193 };
8194 
8195 void VmaStringBuilder::Add(const char* pStr)
8196 {
8197  const size_t strLen = strlen(pStr);
8198  if(strLen > 0)
8199  {
8200  const size_t oldCount = m_Data.size();
8201  m_Data.resize(oldCount + strLen);
8202  memcpy(m_Data.data() + oldCount, pStr, strLen);
8203  }
8204 }
8205 
8206 void VmaStringBuilder::AddNumber(uint32_t num)
8207 {
8208  char buf[11];
8209  buf[10] = '\0';
8210  char *p = &buf[10];
8211  do
8212  {
8213  *--p = '0' + (num % 10);
8214  num /= 10;
8215  }
8216  while(num);
8217  Add(p);
8218 }
8219 
8220 void VmaStringBuilder::AddNumber(uint64_t num)
8221 {
8222  char buf[21];
8223  buf[20] = '\0';
8224  char *p = &buf[20];
8225  do
8226  {
8227  *--p = '0' + (num % 10);
8228  num /= 10;
8229  }
8230  while(num);
8231  Add(p);
8232 }
8233 
8234 void VmaStringBuilder::AddPointer(const void* ptr)
8235 {
8236  char buf[21];
8237  VmaPtrToStr(buf, sizeof(buf), ptr);
8238  Add(buf);
8239 }
8240 
8241 #endif // #if VMA_STATS_STRING_ENABLED
8242 
8244 // VmaJsonWriter
8245 
8246 #if VMA_STATS_STRING_ENABLED
8247 
8248 class VmaJsonWriter
8249 {
8250  VMA_CLASS_NO_COPY(VmaJsonWriter)
8251 public:
8252  VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
8253  ~VmaJsonWriter();
8254 
8255  void BeginObject(bool singleLine = false);
8256  void EndObject();
8257 
8258  void BeginArray(bool singleLine = false);
8259  void EndArray();
8260 
8261  void WriteString(const char* pStr);
8262  void BeginString(const char* pStr = VMA_NULL);
8263  void ContinueString(const char* pStr);
8264  void ContinueString(uint32_t n);
8265  void ContinueString(uint64_t n);
8266  void ContinueString_Pointer(const void* ptr);
8267  void EndString(const char* pStr = VMA_NULL);
8268 
8269  void WriteNumber(uint32_t n);
8270  void WriteNumber(uint64_t n);
8271  void WriteBool(bool b);
8272  void WriteNull();
8273 
8274 private:
8275  static const char* const INDENT;
8276 
8277  enum COLLECTION_TYPE
8278  {
8279  COLLECTION_TYPE_OBJECT,
8280  COLLECTION_TYPE_ARRAY,
8281  };
8282  struct StackItem
8283  {
8284  COLLECTION_TYPE type;
8285  uint32_t valueCount;
8286  bool singleLineMode;
8287  };
8288 
8289  VmaStringBuilder& m_SB;
8290  VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
8291  bool m_InsideString;
8292 
8293  void BeginValue(bool isString);
8294  void WriteIndent(bool oneLess = false);
8295 };
8296 
8297 const char* const VmaJsonWriter::INDENT = " ";
8298 
8299 VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) :
8300  m_SB(sb),
8301  m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
8302  m_InsideString(false)
8303 {
8304 }
8305 
8306 VmaJsonWriter::~VmaJsonWriter()
8307 {
8308  VMA_ASSERT(!m_InsideString);
8309  VMA_ASSERT(m_Stack.empty());
8310 }
8311 
8312 void VmaJsonWriter::BeginObject(bool singleLine)
8313 {
8314  VMA_ASSERT(!m_InsideString);
8315 
8316  BeginValue(false);
8317  m_SB.Add('{');
8318 
8319  StackItem item;
8320  item.type = COLLECTION_TYPE_OBJECT;
8321  item.valueCount = 0;
8322  item.singleLineMode = singleLine;
8323  m_Stack.push_back(item);
8324 }
8325 
8326 void VmaJsonWriter::EndObject()
8327 {
8328  VMA_ASSERT(!m_InsideString);
8329 
8330  WriteIndent(true);
8331  m_SB.Add('}');
8332 
8333  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
8334  m_Stack.pop_back();
8335 }
8336 
8337 void VmaJsonWriter::BeginArray(bool singleLine)
8338 {
8339  VMA_ASSERT(!m_InsideString);
8340 
8341  BeginValue(false);
8342  m_SB.Add('[');
8343 
8344  StackItem item;
8345  item.type = COLLECTION_TYPE_ARRAY;
8346  item.valueCount = 0;
8347  item.singleLineMode = singleLine;
8348  m_Stack.push_back(item);
8349 }
8350 
8351 void VmaJsonWriter::EndArray()
8352 {
8353  VMA_ASSERT(!m_InsideString);
8354 
8355  WriteIndent(true);
8356  m_SB.Add(']');
8357 
8358  VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
8359  m_Stack.pop_back();
8360 }
8361 
8362 void VmaJsonWriter::WriteString(const char* pStr)
8363 {
8364  BeginString(pStr);
8365  EndString();
8366 }
8367 
8368 void VmaJsonWriter::BeginString(const char* pStr)
8369 {
8370  VMA_ASSERT(!m_InsideString);
8371 
8372  BeginValue(true);
8373  m_SB.Add('"');
8374  m_InsideString = true;
8375  if(pStr != VMA_NULL && pStr[0] != '\0')
8376  {
8377  ContinueString(pStr);
8378  }
8379 }
8380 
8381 void VmaJsonWriter::ContinueString(const char* pStr)
8382 {
8383  VMA_ASSERT(m_InsideString);
8384 
8385  const size_t strLen = strlen(pStr);
8386  for(size_t i = 0; i < strLen; ++i)
8387  {
8388  char ch = pStr[i];
8389  if(ch == '\\')
8390  {
8391  m_SB.Add("\\\\");
8392  }
8393  else if(ch == '"')
8394  {
8395  m_SB.Add("\\\"");
8396  }
8397  else if(ch >= 32)
8398  {
8399  m_SB.Add(ch);
8400  }
8401  else switch(ch)
8402  {
8403  case '\b':
8404  m_SB.Add("\\b");
8405  break;
8406  case '\f':
8407  m_SB.Add("\\f");
8408  break;
8409  case '\n':
8410  m_SB.Add("\\n");
8411  break;
8412  case '\r':
8413  m_SB.Add("\\r");
8414  break;
8415  case '\t':
8416  m_SB.Add("\\t");
8417  break;
8418  default:
8419  VMA_ASSERT(0 && "Character not currently supported.");
8420  break;
8421  }
8422  }
8423 }
8424 
8425 void VmaJsonWriter::ContinueString(uint32_t n)
8426 {
8427  VMA_ASSERT(m_InsideString);
8428  m_SB.AddNumber(n);
8429 }
8430 
8431 void VmaJsonWriter::ContinueString(uint64_t n)
8432 {
8433  VMA_ASSERT(m_InsideString);
8434  m_SB.AddNumber(n);
8435 }
8436 
8437 void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
8438 {
8439  VMA_ASSERT(m_InsideString);
8440  m_SB.AddPointer(ptr);
8441 }
8442 
8443 void VmaJsonWriter::EndString(const char* pStr)
8444 {
8445  VMA_ASSERT(m_InsideString);
8446  if(pStr != VMA_NULL && pStr[0] != '\0')
8447  {
8448  ContinueString(pStr);
8449  }
8450  m_SB.Add('"');
8451  m_InsideString = false;
8452 }
8453 
8454 void VmaJsonWriter::WriteNumber(uint32_t n)
8455 {
8456  VMA_ASSERT(!m_InsideString);
8457  BeginValue(false);
8458  m_SB.AddNumber(n);
8459 }
8460 
8461 void VmaJsonWriter::WriteNumber(uint64_t n)
8462 {
8463  VMA_ASSERT(!m_InsideString);
8464  BeginValue(false);
8465  m_SB.AddNumber(n);
8466 }
8467 
8468 void VmaJsonWriter::WriteBool(bool b)
8469 {
8470  VMA_ASSERT(!m_InsideString);
8471  BeginValue(false);
8472  m_SB.Add(b ? "true" : "false");
8473 }
8474 
8475 void VmaJsonWriter::WriteNull()
8476 {
8477  VMA_ASSERT(!m_InsideString);
8478  BeginValue(false);
8479  m_SB.Add("null");
8480 }
8481 
8482 void VmaJsonWriter::BeginValue(bool isString)
8483 {
8484  if(!m_Stack.empty())
8485  {
8486  StackItem& currItem = m_Stack.back();
8487  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8488  currItem.valueCount % 2 == 0)
8489  {
8490  VMA_ASSERT(isString);
8491  }
8492 
8493  if(currItem.type == COLLECTION_TYPE_OBJECT &&
8494  currItem.valueCount % 2 != 0)
8495  {
8496  m_SB.Add(": ");
8497  }
8498  else if(currItem.valueCount > 0)
8499  {
8500  m_SB.Add(", ");
8501  WriteIndent();
8502  }
8503  else
8504  {
8505  WriteIndent();
8506  }
8507  ++currItem.valueCount;
8508  }
8509 }
8510 
8511 void VmaJsonWriter::WriteIndent(bool oneLess)
8512 {
8513  if(!m_Stack.empty() && !m_Stack.back().singleLineMode)
8514  {
8515  m_SB.AddNewLine();
8516 
8517  size_t count = m_Stack.size();
8518  if(count > 0 && oneLess)
8519  {
8520  --count;
8521  }
8522  for(size_t i = 0; i < count; ++i)
8523  {
8524  m_SB.Add(INDENT);
8525  }
8526  }
8527 }
8528 
8529 #endif // #if VMA_STATS_STRING_ENABLED
8530 
8532 
8533 void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData)
8534 {
8535  if(IsUserDataString())
8536  {
8537  VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData);
8538 
8539  FreeUserDataString(hAllocator);
8540 
8541  if(pUserData != VMA_NULL)
8542  {
8543  m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData);
8544  }
8545  }
8546  else
8547  {
8548  m_pUserData = pUserData;
8549  }
8550 }
8551 
8552 void VmaAllocation_T::ChangeBlockAllocation(
8553  VmaAllocator hAllocator,
8554  VmaDeviceMemoryBlock* block,
8555  VkDeviceSize offset)
8556 {
8557  VMA_ASSERT(block != VMA_NULL);
8558  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8559 
8560  // Move mapping reference counter from old block to new block.
8561  if(block != m_BlockAllocation.m_Block)
8562  {
8563  uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP;
8564  if(IsPersistentMap())
8565  ++mapRefCount;
8566  m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount);
8567  block->Map(hAllocator, mapRefCount, VMA_NULL);
8568  }
8569 
8570  m_BlockAllocation.m_Block = block;
8571  m_BlockAllocation.m_Offset = offset;
8572 }
8573 
8574 void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset)
8575 {
8576  VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
8577  m_BlockAllocation.m_Offset = newOffset;
8578 }
8579 
8580 VkDeviceSize VmaAllocation_T::GetOffset() const
8581 {
8582  switch(m_Type)
8583  {
8584  case ALLOCATION_TYPE_BLOCK:
8585  return m_BlockAllocation.m_Offset;
8586  case ALLOCATION_TYPE_DEDICATED:
8587  return 0;
8588  default:
8589  VMA_ASSERT(0);
8590  return 0;
8591  }
8592 }
8593 
8594 VkDeviceMemory VmaAllocation_T::GetMemory() const
8595 {
8596  switch(m_Type)
8597  {
8598  case ALLOCATION_TYPE_BLOCK:
8599  return m_BlockAllocation.m_Block->GetDeviceMemory();
8600  case ALLOCATION_TYPE_DEDICATED:
8601  return m_DedicatedAllocation.m_hMemory;
8602  default:
8603  VMA_ASSERT(0);
8604  return VK_NULL_HANDLE;
8605  }
8606 }
8607 
8608 void* VmaAllocation_T::GetMappedData() const
8609 {
8610  switch(m_Type)
8611  {
8612  case ALLOCATION_TYPE_BLOCK:
8613  if(m_MapCount != 0)
8614  {
8615  void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
8616  VMA_ASSERT(pBlockData != VMA_NULL);
8617  return (char*)pBlockData + m_BlockAllocation.m_Offset;
8618  }
8619  else
8620  {
8621  return VMA_NULL;
8622  }
8623  break;
8624  case ALLOCATION_TYPE_DEDICATED:
8625  VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0));
8626  return m_DedicatedAllocation.m_pMappedData;
8627  default:
8628  VMA_ASSERT(0);
8629  return VMA_NULL;
8630  }
8631 }
8632 
8633 bool VmaAllocation_T::CanBecomeLost() const
8634 {
8635  switch(m_Type)
8636  {
8637  case ALLOCATION_TYPE_BLOCK:
8638  return m_BlockAllocation.m_CanBecomeLost;
8639  case ALLOCATION_TYPE_DEDICATED:
8640  return false;
8641  default:
8642  VMA_ASSERT(0);
8643  return false;
8644  }
8645 }
8646 
8647 bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
8648 {
8649  VMA_ASSERT(CanBecomeLost());
8650 
8651  /*
8652  Warning: This is a carefully designed algorithm.
8653  Do not modify unless you really know what you're doing :)
8654  */
8655  uint32_t localLastUseFrameIndex = GetLastUseFrameIndex();
8656  for(;;)
8657  {
8658  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
8659  {
8660  VMA_ASSERT(0);
8661  return false;
8662  }
8663  else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex)
8664  {
8665  return false;
8666  }
8667  else // Last use time earlier than current time.
8668  {
8669  if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST))
8670  {
8671  // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST.
8672  // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock.
8673  return true;
8674  }
8675  }
8676  }
8677 }
8678 
8679 #if VMA_STATS_STRING_ENABLED
8680 
8681 // Correspond to values of enum VmaSuballocationType.
8682 static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = {
8683  "FREE",
8684  "UNKNOWN",
8685  "BUFFER",
8686  "IMAGE_UNKNOWN",
8687  "IMAGE_LINEAR",
8688  "IMAGE_OPTIMAL",
8689 };
8690 
8691 void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
8692 {
8693  json.WriteString("Type");
8694  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
8695 
8696  json.WriteString("Size");
8697  json.WriteNumber(m_Size);
8698 
8699  if(m_pUserData != VMA_NULL)
8700  {
8701  json.WriteString("UserData");
8702  if(IsUserDataString())
8703  {
8704  json.WriteString((const char*)m_pUserData);
8705  }
8706  else
8707  {
8708  json.BeginString();
8709  json.ContinueString_Pointer(m_pUserData);
8710  json.EndString();
8711  }
8712  }
8713 
8714  json.WriteString("CreationFrameIndex");
8715  json.WriteNumber(m_CreationFrameIndex);
8716 
8717  json.WriteString("LastUseFrameIndex");
8718  json.WriteNumber(GetLastUseFrameIndex());
8719 
8720  if(m_BufferImageUsage != 0)
8721  {
8722  json.WriteString("Usage");
8723  json.WriteNumber(m_BufferImageUsage);
8724  }
8725 }
8726 
8727 #endif
8728 
8729 void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator)
8730 {
8731  VMA_ASSERT(IsUserDataString());
8732  VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData);
8733  m_pUserData = VMA_NULL;
8734 }
8735 
8736 void VmaAllocation_T::BlockAllocMap()
8737 {
8738  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8739 
8740  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8741  {
8742  ++m_MapCount;
8743  }
8744  else
8745  {
8746  VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
8747  }
8748 }
8749 
8750 void VmaAllocation_T::BlockAllocUnmap()
8751 {
8752  VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
8753 
8754  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8755  {
8756  --m_MapCount;
8757  }
8758  else
8759  {
8760  VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
8761  }
8762 }
8763 
8764 VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
8765 {
8766  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8767 
8768  if(m_MapCount != 0)
8769  {
8770  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F)
8771  {
8772  VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
8773  *ppData = m_DedicatedAllocation.m_pMappedData;
8774  ++m_MapCount;
8775  return VK_SUCCESS;
8776  }
8777  else
8778  {
8779  VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
8780  return VK_ERROR_MEMORY_MAP_FAILED;
8781  }
8782  }
8783  else
8784  {
8785  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
8786  hAllocator->m_hDevice,
8787  m_DedicatedAllocation.m_hMemory,
8788  0, // offset
8789  VK_WHOLE_SIZE,
8790  0, // flags
8791  ppData);
8792  if(result == VK_SUCCESS)
8793  {
8794  m_DedicatedAllocation.m_pMappedData = *ppData;
8795  m_MapCount = 1;
8796  }
8797  return result;
8798  }
8799 }
8800 
8801 void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
8802 {
8803  VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
8804 
8805  if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0)
8806  {
8807  --m_MapCount;
8808  if(m_MapCount == 0)
8809  {
8810  m_DedicatedAllocation.m_pMappedData = VMA_NULL;
8811  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
8812  hAllocator->m_hDevice,
8813  m_DedicatedAllocation.m_hMemory);
8814  }
8815  }
8816  else
8817  {
8818  VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
8819  }
8820 }
8821 
8822 #if VMA_STATS_STRING_ENABLED
8823 
8824 static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat)
8825 {
8826  json.BeginObject();
8827 
8828  json.WriteString("Blocks");
8829  json.WriteNumber(stat.blockCount);
8830 
8831  json.WriteString("Allocations");
8832  json.WriteNumber(stat.allocationCount);
8833 
8834  json.WriteString("UnusedRanges");
8835  json.WriteNumber(stat.unusedRangeCount);
8836 
8837  json.WriteString("UsedBytes");
8838  json.WriteNumber(stat.usedBytes);
8839 
8840  json.WriteString("UnusedBytes");
8841  json.WriteNumber(stat.unusedBytes);
8842 
8843  if(stat.allocationCount > 1)
8844  {
8845  json.WriteString("AllocationSize");
8846  json.BeginObject(true);
8847  json.WriteString("Min");
8848  json.WriteNumber(stat.allocationSizeMin);
8849  json.WriteString("Avg");
8850  json.WriteNumber(stat.allocationSizeAvg);
8851  json.WriteString("Max");
8852  json.WriteNumber(stat.allocationSizeMax);
8853  json.EndObject();
8854  }
8855 
8856  if(stat.unusedRangeCount > 1)
8857  {
8858  json.WriteString("UnusedRangeSize");
8859  json.BeginObject(true);
8860  json.WriteString("Min");
8861  json.WriteNumber(stat.unusedRangeSizeMin);
8862  json.WriteString("Avg");
8863  json.WriteNumber(stat.unusedRangeSizeAvg);
8864  json.WriteString("Max");
8865  json.WriteNumber(stat.unusedRangeSizeMax);
8866  json.EndObject();
8867  }
8868 
8869  json.EndObject();
8870 }
8871 
8872 #endif // #if VMA_STATS_STRING_ENABLED
8873 
8874 struct VmaSuballocationItemSizeLess
8875 {
8876  bool operator()(
8877  const VmaSuballocationList::iterator lhs,
8878  const VmaSuballocationList::iterator rhs) const
8879  {
8880  return lhs->size < rhs->size;
8881  }
8882  bool operator()(
8883  const VmaSuballocationList::iterator lhs,
8884  VkDeviceSize rhsSize) const
8885  {
8886  return lhs->size < rhsSize;
8887  }
8888 };
8889 
8890 
8892 // class VmaBlockMetadata
8893 
8894 VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) :
8895  m_Size(0),
8896  m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks())
8897 {
8898 }
8899 
8900 #if VMA_STATS_STRING_ENABLED
8901 
8902 void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
8903  VkDeviceSize unusedBytes,
8904  size_t allocationCount,
8905  size_t unusedRangeCount) const
8906 {
8907  json.BeginObject();
8908 
8909  json.WriteString("TotalBytes");
8910  json.WriteNumber(GetSize());
8911 
8912  json.WriteString("UnusedBytes");
8913  json.WriteNumber(unusedBytes);
8914 
8915  json.WriteString("Allocations");
8916  json.WriteNumber((uint64_t)allocationCount);
8917 
8918  json.WriteString("UnusedRanges");
8919  json.WriteNumber((uint64_t)unusedRangeCount);
8920 
8921  json.WriteString("Suballocations");
8922  json.BeginArray();
8923 }
8924 
8925 void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
8926  VkDeviceSize offset,
8927  VmaAllocation hAllocation) const
8928 {
8929  json.BeginObject(true);
8930 
8931  json.WriteString("Offset");
8932  json.WriteNumber(offset);
8933 
8934  hAllocation->PrintParameters(json);
8935 
8936  json.EndObject();
8937 }
8938 
8939 void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
8940  VkDeviceSize offset,
8941  VkDeviceSize size) const
8942 {
8943  json.BeginObject(true);
8944 
8945  json.WriteString("Offset");
8946  json.WriteNumber(offset);
8947 
8948  json.WriteString("Type");
8949  json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
8950 
8951  json.WriteString("Size");
8952  json.WriteNumber(size);
8953 
8954  json.EndObject();
8955 }
8956 
8957 void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
8958 {
8959  json.EndArray();
8960  json.EndObject();
8961 }
8962 
8963 #endif // #if VMA_STATS_STRING_ENABLED
8964 
8966 // class VmaBlockMetadata_Generic
8967 
8968 VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) :
8969  VmaBlockMetadata(hAllocator),
8970  m_FreeCount(0),
8971  m_SumFreeSize(0),
8972  m_Suballocations(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
8973  m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(hAllocator->GetAllocationCallbacks()))
8974 {
8975 }
8976 
8977 VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic()
8978 {
8979 }
8980 
8981 void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
8982 {
8983  VmaBlockMetadata::Init(size);
8984 
8985  m_FreeCount = 1;
8986  m_SumFreeSize = size;
8987 
8988  VmaSuballocation suballoc = {};
8989  suballoc.offset = 0;
8990  suballoc.size = size;
8991  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
8992  suballoc.hAllocation = VK_NULL_HANDLE;
8993 
8994  VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
8995  m_Suballocations.push_back(suballoc);
8996  VmaSuballocationList::iterator suballocItem = m_Suballocations.end();
8997  --suballocItem;
8998  m_FreeSuballocationsBySize.push_back(suballocItem);
8999 }
9000 
9001 bool VmaBlockMetadata_Generic::Validate() const
9002 {
9003  VMA_VALIDATE(!m_Suballocations.empty());
9004 
9005  // Expected offset of new suballocation as calculated from previous ones.
9006  VkDeviceSize calculatedOffset = 0;
9007  // Expected number of free suballocations as calculated from traversing their list.
9008  uint32_t calculatedFreeCount = 0;
9009  // Expected sum size of free suballocations as calculated from traversing their list.
9010  VkDeviceSize calculatedSumFreeSize = 0;
9011  // Expected number of free suballocations that should be registered in
9012  // m_FreeSuballocationsBySize calculated from traversing their list.
9013  size_t freeSuballocationsToRegister = 0;
9014  // True if previous visited suballocation was free.
9015  bool prevFree = false;
9016 
9017  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9018  suballocItem != m_Suballocations.cend();
9019  ++suballocItem)
9020  {
9021  const VmaSuballocation& subAlloc = *suballocItem;
9022 
9023  // Actual offset of this suballocation doesn't match expected one.
9024  VMA_VALIDATE(subAlloc.offset == calculatedOffset);
9025 
9026  const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
9027  // Two adjacent free suballocations are invalid. They should be merged.
9028  VMA_VALIDATE(!prevFree || !currFree);
9029 
9030  VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE));
9031 
9032  if(currFree)
9033  {
9034  calculatedSumFreeSize += subAlloc.size;
9035  ++calculatedFreeCount;
9036  if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9037  {
9038  ++freeSuballocationsToRegister;
9039  }
9040 
9041  // Margin required between allocations - every free space must be at least that large.
9042  VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN);
9043  }
9044  else
9045  {
9046  VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset);
9047  VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size);
9048 
9049  // Margin required between allocations - previous allocation must be free.
9050  VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree);
9051  }
9052 
9053  calculatedOffset += subAlloc.size;
9054  prevFree = currFree;
9055  }
9056 
9057  // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
9058  // match expected one.
9059  VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
9060 
9061  VkDeviceSize lastSize = 0;
9062  for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
9063  {
9064  VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
9065 
9066  // Only free suballocations can be registered in m_FreeSuballocationsBySize.
9067  VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9068  // They must be sorted by size ascending.
9069  VMA_VALIDATE(suballocItem->size >= lastSize);
9070 
9071  lastSize = suballocItem->size;
9072  }
9073 
9074  // Check if totals match calculacted values.
9075  VMA_VALIDATE(ValidateFreeSuballocationList());
9076  VMA_VALIDATE(calculatedOffset == GetSize());
9077  VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
9078  VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
9079 
9080  return true;
9081 }
9082 
9083 VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const
9084 {
9085  if(!m_FreeSuballocationsBySize.empty())
9086  {
9087  return m_FreeSuballocationsBySize.back()->size;
9088  }
9089  else
9090  {
9091  return 0;
9092  }
9093 }
9094 
9095 bool VmaBlockMetadata_Generic::IsEmpty() const
9096 {
9097  return (m_Suballocations.size() == 1) && (m_FreeCount == 1);
9098 }
9099 
9100 void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
9101 {
9102  outInfo.blockCount = 1;
9103 
9104  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9105  outInfo.allocationCount = rangeCount - m_FreeCount;
9106  outInfo.unusedRangeCount = m_FreeCount;
9107 
9108  outInfo.unusedBytes = m_SumFreeSize;
9109  outInfo.usedBytes = GetSize() - outInfo.unusedBytes;
9110 
9111  outInfo.allocationSizeMin = UINT64_MAX;
9112  outInfo.allocationSizeMax = 0;
9113  outInfo.unusedRangeSizeMin = UINT64_MAX;
9114  outInfo.unusedRangeSizeMax = 0;
9115 
9116  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9117  suballocItem != m_Suballocations.cend();
9118  ++suballocItem)
9119  {
9120  const VmaSuballocation& suballoc = *suballocItem;
9121  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
9122  {
9123  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
9124  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size);
9125  }
9126  else
9127  {
9128  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size);
9129  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size);
9130  }
9131  }
9132 }
9133 
9134 void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const
9135 {
9136  const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
9137 
9138  inoutStats.size += GetSize();
9139  inoutStats.unusedSize += m_SumFreeSize;
9140  inoutStats.allocationCount += rangeCount - m_FreeCount;
9141  inoutStats.unusedRangeCount += m_FreeCount;
9142  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
9143 }
9144 
9145 #if VMA_STATS_STRING_ENABLED
9146 
9147 void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const
9148 {
9149  PrintDetailedMap_Begin(json,
9150  m_SumFreeSize, // unusedBytes
9151  m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
9152  m_FreeCount); // unusedRangeCount
9153 
9154  size_t i = 0;
9155  for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin();
9156  suballocItem != m_Suballocations.cend();
9157  ++suballocItem, ++i)
9158  {
9159  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9160  {
9161  PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size);
9162  }
9163  else
9164  {
9165  PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation);
9166  }
9167  }
9168 
9169  PrintDetailedMap_End(json);
9170 }
9171 
9172 #endif // #if VMA_STATS_STRING_ENABLED
9173 
9174 bool VmaBlockMetadata_Generic::CreateAllocationRequest(
9175  uint32_t currentFrameIndex,
9176  uint32_t frameInUseCount,
9177  VkDeviceSize bufferImageGranularity,
9178  VkDeviceSize allocSize,
9179  VkDeviceSize allocAlignment,
9180  bool upperAddress,
9181  VmaSuballocationType allocType,
9182  bool canMakeOtherLost,
9183  uint32_t strategy,
9184  VmaAllocationRequest* pAllocationRequest)
9185 {
9186  VMA_ASSERT(allocSize > 0);
9187  VMA_ASSERT(!upperAddress);
9188  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9189  VMA_ASSERT(pAllocationRequest != VMA_NULL);
9190  VMA_HEAVY_ASSERT(Validate());
9191 
9192  pAllocationRequest->type = VmaAllocationRequestType::Normal;
9193 
9194  // There is not enough total free space in this block to fullfill the request: Early return.
9195  if(canMakeOtherLost == false &&
9196  m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN)
9197  {
9198  return false;
9199  }
9200 
9201  // New algorithm, efficiently searching freeSuballocationsBySize.
9202  const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
9203  if(freeSuballocCount > 0)
9204  {
9206  {
9207  // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN.
9208  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9209  m_FreeSuballocationsBySize.data(),
9210  m_FreeSuballocationsBySize.data() + freeSuballocCount,
9211  allocSize + 2 * VMA_DEBUG_MARGIN,
9212  VmaSuballocationItemSizeLess());
9213  size_t index = it - m_FreeSuballocationsBySize.data();
9214  for(; index < freeSuballocCount; ++index)
9215  {
9216  if(CheckAllocation(
9217  currentFrameIndex,
9218  frameInUseCount,
9219  bufferImageGranularity,
9220  allocSize,
9221  allocAlignment,
9222  allocType,
9223  m_FreeSuballocationsBySize[index],
9224  false, // canMakeOtherLost
9225  &pAllocationRequest->offset,
9226  &pAllocationRequest->itemsToMakeLostCount,
9227  &pAllocationRequest->sumFreeSize,
9228  &pAllocationRequest->sumItemSize))
9229  {
9230  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9231  return true;
9232  }
9233  }
9234  }
9235  else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
9236  {
9237  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9238  it != m_Suballocations.end();
9239  ++it)
9240  {
9241  if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
9242  currentFrameIndex,
9243  frameInUseCount,
9244  bufferImageGranularity,
9245  allocSize,
9246  allocAlignment,
9247  allocType,
9248  it,
9249  false, // canMakeOtherLost
9250  &pAllocationRequest->offset,
9251  &pAllocationRequest->itemsToMakeLostCount,
9252  &pAllocationRequest->sumFreeSize,
9253  &pAllocationRequest->sumItemSize))
9254  {
9255  pAllocationRequest->item = it;
9256  return true;
9257  }
9258  }
9259  }
9260  else // WORST_FIT, FIRST_FIT
9261  {
9262  // Search staring from biggest suballocations.
9263  for(size_t index = freeSuballocCount; index--; )
9264  {
9265  if(CheckAllocation(
9266  currentFrameIndex,
9267  frameInUseCount,
9268  bufferImageGranularity,
9269  allocSize,
9270  allocAlignment,
9271  allocType,
9272  m_FreeSuballocationsBySize[index],
9273  false, // canMakeOtherLost
9274  &pAllocationRequest->offset,
9275  &pAllocationRequest->itemsToMakeLostCount,
9276  &pAllocationRequest->sumFreeSize,
9277  &pAllocationRequest->sumItemSize))
9278  {
9279  pAllocationRequest->item = m_FreeSuballocationsBySize[index];
9280  return true;
9281  }
9282  }
9283  }
9284  }
9285 
9286  if(canMakeOtherLost)
9287  {
9288  // Brute-force algorithm. TODO: Come up with something better.
9289 
9290  bool found = false;
9291  VmaAllocationRequest tmpAllocRequest = {};
9292  tmpAllocRequest.type = VmaAllocationRequestType::Normal;
9293  for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin();
9294  suballocIt != m_Suballocations.end();
9295  ++suballocIt)
9296  {
9297  if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE ||
9298  suballocIt->hAllocation->CanBecomeLost())
9299  {
9300  if(CheckAllocation(
9301  currentFrameIndex,
9302  frameInUseCount,
9303  bufferImageGranularity,
9304  allocSize,
9305  allocAlignment,
9306  allocType,
9307  suballocIt,
9308  canMakeOtherLost,
9309  &tmpAllocRequest.offset,
9310  &tmpAllocRequest.itemsToMakeLostCount,
9311  &tmpAllocRequest.sumFreeSize,
9312  &tmpAllocRequest.sumItemSize))
9313  {
9315  {
9316  *pAllocationRequest = tmpAllocRequest;
9317  pAllocationRequest->item = suballocIt;
9318  break;
9319  }
9320  if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost())
9321  {
9322  *pAllocationRequest = tmpAllocRequest;
9323  pAllocationRequest->item = suballocIt;
9324  found = true;
9325  }
9326  }
9327  }
9328  }
9329 
9330  return found;
9331  }
9332 
9333  return false;
9334 }
9335 
9336 bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost(
9337  uint32_t currentFrameIndex,
9338  uint32_t frameInUseCount,
9339  VmaAllocationRequest* pAllocationRequest)
9340 {
9341  VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal);
9342 
9343  while(pAllocationRequest->itemsToMakeLostCount > 0)
9344  {
9345  if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE)
9346  {
9347  ++pAllocationRequest->item;
9348  }
9349  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9350  VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE);
9351  VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost());
9352  if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9353  {
9354  pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item);
9355  --pAllocationRequest->itemsToMakeLostCount;
9356  }
9357  else
9358  {
9359  return false;
9360  }
9361  }
9362 
9363  VMA_HEAVY_ASSERT(Validate());
9364  VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end());
9365  VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE);
9366 
9367  return true;
9368 }
9369 
9370 uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
9371 {
9372  uint32_t lostAllocationCount = 0;
9373  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9374  it != m_Suballocations.end();
9375  ++it)
9376  {
9377  if(it->type != VMA_SUBALLOCATION_TYPE_FREE &&
9378  it->hAllocation->CanBecomeLost() &&
9379  it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
9380  {
9381  it = FreeSuballocation(it);
9382  ++lostAllocationCount;
9383  }
9384  }
9385  return lostAllocationCount;
9386 }
9387 
9388 VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
9389 {
9390  for(VmaSuballocationList::iterator it = m_Suballocations.begin();
9391  it != m_Suballocations.end();
9392  ++it)
9393  {
9394  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
9395  {
9396  if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN))
9397  {
9398  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
9399  return VK_ERROR_VALIDATION_FAILED_EXT;
9400  }
9401  if(!VmaValidateMagicValue(pBlockData, it->offset + it->size))
9402  {
9403  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
9404  return VK_ERROR_VALIDATION_FAILED_EXT;
9405  }
9406  }
9407  }
9408 
9409  return VK_SUCCESS;
9410 }
9411 
9412 void VmaBlockMetadata_Generic::Alloc(
9413  const VmaAllocationRequest& request,
9414  VmaSuballocationType type,
9415  VkDeviceSize allocSize,
9416  VmaAllocation hAllocation)
9417 {
9418  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
9419  VMA_ASSERT(request.item != m_Suballocations.end());
9420  VmaSuballocation& suballoc = *request.item;
9421  // Given suballocation is a free block.
9422  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9423  // Given offset is inside this suballocation.
9424  VMA_ASSERT(request.offset >= suballoc.offset);
9425  const VkDeviceSize paddingBegin = request.offset - suballoc.offset;
9426  VMA_ASSERT(suballoc.size >= paddingBegin + allocSize);
9427  const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize;
9428 
9429  // Unregister this free suballocation from m_FreeSuballocationsBySize and update
9430  // it to become used.
9431  UnregisterFreeSuballocation(request.item);
9432 
9433  suballoc.offset = request.offset;
9434  suballoc.size = allocSize;
9435  suballoc.type = type;
9436  suballoc.hAllocation = hAllocation;
9437 
9438  // If there are any free bytes remaining at the end, insert new free suballocation after current one.
9439  if(paddingEnd)
9440  {
9441  VmaSuballocation paddingSuballoc = {};
9442  paddingSuballoc.offset = request.offset + allocSize;
9443  paddingSuballoc.size = paddingEnd;
9444  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9445  VmaSuballocationList::iterator next = request.item;
9446  ++next;
9447  const VmaSuballocationList::iterator paddingEndItem =
9448  m_Suballocations.insert(next, paddingSuballoc);
9449  RegisterFreeSuballocation(paddingEndItem);
9450  }
9451 
9452  // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
9453  if(paddingBegin)
9454  {
9455  VmaSuballocation paddingSuballoc = {};
9456  paddingSuballoc.offset = request.offset - paddingBegin;
9457  paddingSuballoc.size = paddingBegin;
9458  paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9459  const VmaSuballocationList::iterator paddingBeginItem =
9460  m_Suballocations.insert(request.item, paddingSuballoc);
9461  RegisterFreeSuballocation(paddingBeginItem);
9462  }
9463 
9464  // Update totals.
9465  m_FreeCount = m_FreeCount - 1;
9466  if(paddingBegin > 0)
9467  {
9468  ++m_FreeCount;
9469  }
9470  if(paddingEnd > 0)
9471  {
9472  ++m_FreeCount;
9473  }
9474  m_SumFreeSize -= allocSize;
9475 }
9476 
9477 void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation)
9478 {
9479  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9480  suballocItem != m_Suballocations.end();
9481  ++suballocItem)
9482  {
9483  VmaSuballocation& suballoc = *suballocItem;
9484  if(suballoc.hAllocation == allocation)
9485  {
9486  FreeSuballocation(suballocItem);
9487  VMA_HEAVY_ASSERT(Validate());
9488  return;
9489  }
9490  }
9491  VMA_ASSERT(0 && "Not found!");
9492 }
9493 
9494 void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset)
9495 {
9496  for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin();
9497  suballocItem != m_Suballocations.end();
9498  ++suballocItem)
9499  {
9500  VmaSuballocation& suballoc = *suballocItem;
9501  if(suballoc.offset == offset)
9502  {
9503  FreeSuballocation(suballocItem);
9504  return;
9505  }
9506  }
9507  VMA_ASSERT(0 && "Not found!");
9508 }
9509 
9510 bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
9511 {
9512  VkDeviceSize lastSize = 0;
9513  for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
9514  {
9515  const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
9516 
9517  VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
9518  VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER);
9519  VMA_VALIDATE(it->size >= lastSize);
9520  lastSize = it->size;
9521  }
9522  return true;
9523 }
9524 
9525 bool VmaBlockMetadata_Generic::CheckAllocation(
9526  uint32_t currentFrameIndex,
9527  uint32_t frameInUseCount,
9528  VkDeviceSize bufferImageGranularity,
9529  VkDeviceSize allocSize,
9530  VkDeviceSize allocAlignment,
9531  VmaSuballocationType allocType,
9532  VmaSuballocationList::const_iterator suballocItem,
9533  bool canMakeOtherLost,
9534  VkDeviceSize* pOffset,
9535  size_t* itemsToMakeLostCount,
9536  VkDeviceSize* pSumFreeSize,
9537  VkDeviceSize* pSumItemSize) const
9538 {
9539  VMA_ASSERT(allocSize > 0);
9540  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
9541  VMA_ASSERT(suballocItem != m_Suballocations.cend());
9542  VMA_ASSERT(pOffset != VMA_NULL);
9543 
9544  *itemsToMakeLostCount = 0;
9545  *pSumFreeSize = 0;
9546  *pSumItemSize = 0;
9547 
9548  if(canMakeOtherLost)
9549  {
9550  if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9551  {
9552  *pSumFreeSize = suballocItem->size;
9553  }
9554  else
9555  {
9556  if(suballocItem->hAllocation->CanBecomeLost() &&
9557  suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9558  {
9559  ++*itemsToMakeLostCount;
9560  *pSumItemSize = suballocItem->size;
9561  }
9562  else
9563  {
9564  return false;
9565  }
9566  }
9567 
9568  // Remaining size is too small for this request: Early return.
9569  if(GetSize() - suballocItem->offset < allocSize)
9570  {
9571  return false;
9572  }
9573 
9574  // Start from offset equal to beginning of this suballocation.
9575  *pOffset = suballocItem->offset;
9576 
9577  // Apply VMA_DEBUG_MARGIN at the beginning.
9578  if(VMA_DEBUG_MARGIN > 0)
9579  {
9580  *pOffset += VMA_DEBUG_MARGIN;
9581  }
9582 
9583  // Apply alignment.
9584  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9585 
9586  // Check previous suballocations for BufferImageGranularity conflicts.
9587  // Make bigger alignment if necessary.
9588  if(bufferImageGranularity > 1)
9589  {
9590  bool bufferImageGranularityConflict = false;
9591  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9592  while(prevSuballocItem != m_Suballocations.cbegin())
9593  {
9594  --prevSuballocItem;
9595  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9596  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9597  {
9598  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9599  {
9600  bufferImageGranularityConflict = true;
9601  break;
9602  }
9603  }
9604  else
9605  // Already on previous page.
9606  break;
9607  }
9608  if(bufferImageGranularityConflict)
9609  {
9610  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9611  }
9612  }
9613 
9614  // Now that we have final *pOffset, check if we are past suballocItem.
9615  // If yes, return false - this function should be called for another suballocItem as starting point.
9616  if(*pOffset >= suballocItem->offset + suballocItem->size)
9617  {
9618  return false;
9619  }
9620 
9621  // Calculate padding at the beginning based on current offset.
9622  const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset;
9623 
9624  // Calculate required margin at the end.
9625  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9626 
9627  const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin;
9628  // Another early return check.
9629  if(suballocItem->offset + totalSize > GetSize())
9630  {
9631  return false;
9632  }
9633 
9634  // Advance lastSuballocItem until desired size is reached.
9635  // Update itemsToMakeLostCount.
9636  VmaSuballocationList::const_iterator lastSuballocItem = suballocItem;
9637  if(totalSize > suballocItem->size)
9638  {
9639  VkDeviceSize remainingSize = totalSize - suballocItem->size;
9640  while(remainingSize > 0)
9641  {
9642  ++lastSuballocItem;
9643  if(lastSuballocItem == m_Suballocations.cend())
9644  {
9645  return false;
9646  }
9647  if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9648  {
9649  *pSumFreeSize += lastSuballocItem->size;
9650  }
9651  else
9652  {
9653  VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE);
9654  if(lastSuballocItem->hAllocation->CanBecomeLost() &&
9655  lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9656  {
9657  ++*itemsToMakeLostCount;
9658  *pSumItemSize += lastSuballocItem->size;
9659  }
9660  else
9661  {
9662  return false;
9663  }
9664  }
9665  remainingSize = (lastSuballocItem->size < remainingSize) ?
9666  remainingSize - lastSuballocItem->size : 0;
9667  }
9668  }
9669 
9670  // Check next suballocations for BufferImageGranularity conflicts.
9671  // If conflict exists, we must mark more allocations lost or fail.
9672  if(bufferImageGranularity > 1)
9673  {
9674  VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem;
9675  ++nextSuballocItem;
9676  while(nextSuballocItem != m_Suballocations.cend())
9677  {
9678  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9679  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9680  {
9681  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9682  {
9683  VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE);
9684  if(nextSuballoc.hAllocation->CanBecomeLost() &&
9685  nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
9686  {
9687  ++*itemsToMakeLostCount;
9688  }
9689  else
9690  {
9691  return false;
9692  }
9693  }
9694  }
9695  else
9696  {
9697  // Already on next page.
9698  break;
9699  }
9700  ++nextSuballocItem;
9701  }
9702  }
9703  }
9704  else
9705  {
9706  const VmaSuballocation& suballoc = *suballocItem;
9707  VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
9708 
9709  *pSumFreeSize = suballoc.size;
9710 
9711  // Size of this suballocation is too small for this request: Early return.
9712  if(suballoc.size < allocSize)
9713  {
9714  return false;
9715  }
9716 
9717  // Start from offset equal to beginning of this suballocation.
9718  *pOffset = suballoc.offset;
9719 
9720  // Apply VMA_DEBUG_MARGIN at the beginning.
9721  if(VMA_DEBUG_MARGIN > 0)
9722  {
9723  *pOffset += VMA_DEBUG_MARGIN;
9724  }
9725 
9726  // Apply alignment.
9727  *pOffset = VmaAlignUp(*pOffset, allocAlignment);
9728 
9729  // Check previous suballocations for BufferImageGranularity conflicts.
9730  // Make bigger alignment if necessary.
9731  if(bufferImageGranularity > 1)
9732  {
9733  bool bufferImageGranularityConflict = false;
9734  VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
9735  while(prevSuballocItem != m_Suballocations.cbegin())
9736  {
9737  --prevSuballocItem;
9738  const VmaSuballocation& prevSuballoc = *prevSuballocItem;
9739  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity))
9740  {
9741  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
9742  {
9743  bufferImageGranularityConflict = true;
9744  break;
9745  }
9746  }
9747  else
9748  // Already on previous page.
9749  break;
9750  }
9751  if(bufferImageGranularityConflict)
9752  {
9753  *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity);
9754  }
9755  }
9756 
9757  // Calculate padding at the beginning based on current offset.
9758  const VkDeviceSize paddingBegin = *pOffset - suballoc.offset;
9759 
9760  // Calculate required margin at the end.
9761  const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN;
9762 
9763  // Fail if requested size plus margin before and after is bigger than size of this suballocation.
9764  if(paddingBegin + allocSize + requiredEndMargin > suballoc.size)
9765  {
9766  return false;
9767  }
9768 
9769  // Check next suballocations for BufferImageGranularity conflicts.
9770  // If conflict exists, allocation cannot be made here.
9771  if(bufferImageGranularity > 1)
9772  {
9773  VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
9774  ++nextSuballocItem;
9775  while(nextSuballocItem != m_Suballocations.cend())
9776  {
9777  const VmaSuballocation& nextSuballoc = *nextSuballocItem;
9778  if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
9779  {
9780  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
9781  {
9782  return false;
9783  }
9784  }
9785  else
9786  {
9787  // Already on next page.
9788  break;
9789  }
9790  ++nextSuballocItem;
9791  }
9792  }
9793  }
9794 
9795  // All tests passed: Success. pOffset is already filled.
9796  return true;
9797 }
9798 
9799 void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
9800 {
9801  VMA_ASSERT(item != m_Suballocations.end());
9802  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9803 
9804  VmaSuballocationList::iterator nextItem = item;
9805  ++nextItem;
9806  VMA_ASSERT(nextItem != m_Suballocations.end());
9807  VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
9808 
9809  item->size += nextItem->size;
9810  --m_FreeCount;
9811  m_Suballocations.erase(nextItem);
9812 }
9813 
9814 VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
9815 {
9816  // Change this suballocation to be marked as free.
9817  VmaSuballocation& suballoc = *suballocItem;
9818  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
9819  suballoc.hAllocation = VK_NULL_HANDLE;
9820 
9821  // Update totals.
9822  ++m_FreeCount;
9823  m_SumFreeSize += suballoc.size;
9824 
9825  // Merge with previous and/or next suballocation if it's also free.
9826  bool mergeWithNext = false;
9827  bool mergeWithPrev = false;
9828 
9829  VmaSuballocationList::iterator nextItem = suballocItem;
9830  ++nextItem;
9831  if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
9832  {
9833  mergeWithNext = true;
9834  }
9835 
9836  VmaSuballocationList::iterator prevItem = suballocItem;
9837  if(suballocItem != m_Suballocations.begin())
9838  {
9839  --prevItem;
9840  if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
9841  {
9842  mergeWithPrev = true;
9843  }
9844  }
9845 
9846  if(mergeWithNext)
9847  {
9848  UnregisterFreeSuballocation(nextItem);
9849  MergeFreeWithNext(suballocItem);
9850  }
9851 
9852  if(mergeWithPrev)
9853  {
9854  UnregisterFreeSuballocation(prevItem);
9855  MergeFreeWithNext(prevItem);
9856  RegisterFreeSuballocation(prevItem);
9857  return prevItem;
9858  }
9859  else
9860  {
9861  RegisterFreeSuballocation(suballocItem);
9862  return suballocItem;
9863  }
9864 }
9865 
9866 void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
9867 {
9868  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9869  VMA_ASSERT(item->size > 0);
9870 
9871  // You may want to enable this validation at the beginning or at the end of
9872  // this function, depending on what do you want to check.
9873  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9874 
9875  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9876  {
9877  if(m_FreeSuballocationsBySize.empty())
9878  {
9879  m_FreeSuballocationsBySize.push_back(item);
9880  }
9881  else
9882  {
9883  VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
9884  }
9885  }
9886 
9887  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9888 }
9889 
9890 
9891 void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
9892 {
9893  VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
9894  VMA_ASSERT(item->size > 0);
9895 
9896  // You may want to enable this validation at the beginning or at the end of
9897  // this function, depending on what do you want to check.
9898  VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9899 
9900  if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
9901  {
9902  VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
9903  m_FreeSuballocationsBySize.data(),
9904  m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
9905  item,
9906  VmaSuballocationItemSizeLess());
9907  for(size_t index = it - m_FreeSuballocationsBySize.data();
9908  index < m_FreeSuballocationsBySize.size();
9909  ++index)
9910  {
9911  if(m_FreeSuballocationsBySize[index] == item)
9912  {
9913  VmaVectorRemove(m_FreeSuballocationsBySize, index);
9914  return;
9915  }
9916  VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
9917  }
9918  VMA_ASSERT(0 && "Not found.");
9919  }
9920 
9921  //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
9922 }
9923 
9924 bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible(
9925  VkDeviceSize bufferImageGranularity,
9926  VmaSuballocationType& inOutPrevSuballocType) const
9927 {
9928  if(bufferImageGranularity == 1 || IsEmpty())
9929  {
9930  return false;
9931  }
9932 
9933  VkDeviceSize minAlignment = VK_WHOLE_SIZE;
9934  bool typeConflictFound = false;
9935  for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin();
9936  it != m_Suballocations.cend();
9937  ++it)
9938  {
9939  const VmaSuballocationType suballocType = it->type;
9940  if(suballocType != VMA_SUBALLOCATION_TYPE_FREE)
9941  {
9942  minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment());
9943  if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType))
9944  {
9945  typeConflictFound = true;
9946  }
9947  inOutPrevSuballocType = suballocType;
9948  }
9949  }
9950 
9951  return typeConflictFound || minAlignment >= bufferImageGranularity;
9952 }
9953 
9955 // class VmaBlockMetadata_Linear
9956 
9957 VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) :
9958  VmaBlockMetadata(hAllocator),
9959  m_SumFreeSize(0),
9960  m_Suballocations0(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9961  m_Suballocations1(VmaStlAllocator<VmaSuballocation>(hAllocator->GetAllocationCallbacks())),
9962  m_1stVectorIndex(0),
9963  m_2ndVectorMode(SECOND_VECTOR_EMPTY),
9964  m_1stNullItemsBeginCount(0),
9965  m_1stNullItemsMiddleCount(0),
9966  m_2ndNullItemsCount(0)
9967 {
9968 }
9969 
9970 VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear()
9971 {
9972 }
9973 
9974 void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
9975 {
9976  VmaBlockMetadata::Init(size);
9977  m_SumFreeSize = size;
9978 }
9979 
9980 bool VmaBlockMetadata_Linear::Validate() const
9981 {
9982  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
9983  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
9984 
9985  VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
9986  VMA_VALIDATE(!suballocations1st.empty() ||
9987  suballocations2nd.empty() ||
9988  m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
9989 
9990  if(!suballocations1st.empty())
9991  {
9992  // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
9993  VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE);
9994  // Null item at the end should be just pop_back().
9995  VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE);
9996  }
9997  if(!suballocations2nd.empty())
9998  {
9999  // Null item at the end should be just pop_back().
10000  VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE);
10001  }
10002 
10003  VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
10004  VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
10005 
10006  VkDeviceSize sumUsedSize = 0;
10007  const size_t suballoc1stCount = suballocations1st.size();
10008  VkDeviceSize offset = VMA_DEBUG_MARGIN;
10009 
10010  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10011  {
10012  const size_t suballoc2ndCount = suballocations2nd.size();
10013  size_t nullItem2ndCount = 0;
10014  for(size_t i = 0; i < suballoc2ndCount; ++i)
10015  {
10016  const VmaSuballocation& suballoc = suballocations2nd[i];
10017  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10018 
10019  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10020  VMA_VALIDATE(suballoc.offset >= offset);
10021 
10022  if(!currFree)
10023  {
10024  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10025  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10026  sumUsedSize += suballoc.size;
10027  }
10028  else
10029  {
10030  ++nullItem2ndCount;
10031  }
10032 
10033  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10034  }
10035 
10036  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10037  }
10038 
10039  for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
10040  {
10041  const VmaSuballocation& suballoc = suballocations1st[i];
10042  VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
10043  suballoc.hAllocation == VK_NULL_HANDLE);
10044  }
10045 
10046  size_t nullItem1stCount = m_1stNullItemsBeginCount;
10047 
10048  for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
10049  {
10050  const VmaSuballocation& suballoc = suballocations1st[i];
10051  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10052 
10053  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10054  VMA_VALIDATE(suballoc.offset >= offset);
10055  VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
10056 
10057  if(!currFree)
10058  {
10059  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10060  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10061  sumUsedSize += suballoc.size;
10062  }
10063  else
10064  {
10065  ++nullItem1stCount;
10066  }
10067 
10068  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10069  }
10070  VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
10071 
10072  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10073  {
10074  const size_t suballoc2ndCount = suballocations2nd.size();
10075  size_t nullItem2ndCount = 0;
10076  for(size_t i = suballoc2ndCount; i--; )
10077  {
10078  const VmaSuballocation& suballoc = suballocations2nd[i];
10079  const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
10080 
10081  VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE));
10082  VMA_VALIDATE(suballoc.offset >= offset);
10083 
10084  if(!currFree)
10085  {
10086  VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset);
10087  VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size);
10088  sumUsedSize += suballoc.size;
10089  }
10090  else
10091  {
10092  ++nullItem2ndCount;
10093  }
10094 
10095  offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN;
10096  }
10097 
10098  VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
10099  }
10100 
10101  VMA_VALIDATE(offset <= GetSize());
10102  VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
10103 
10104  return true;
10105 }
10106 
10107 size_t VmaBlockMetadata_Linear::GetAllocationCount() const
10108 {
10109  return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) +
10110  AccessSuballocations2nd().size() - m_2ndNullItemsCount;
10111 }
10112 
10113 VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const
10114 {
10115  const VkDeviceSize size = GetSize();
10116 
10117  /*
10118  We don't consider gaps inside allocation vectors with freed allocations because
10119  they are not suitable for reuse in linear allocator. We consider only space that
10120  is available for new allocations.
10121  */
10122  if(IsEmpty())
10123  {
10124  return size;
10125  }
10126 
10127  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10128 
10129  switch(m_2ndVectorMode)
10130  {
10131  case SECOND_VECTOR_EMPTY:
10132  /*
10133  Available space is after end of 1st, as well as before beginning of 1st (which
10134  whould make it a ring buffer).
10135  */
10136  {
10137  const size_t suballocations1stCount = suballocations1st.size();
10138  VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount);
10139  const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
10140  const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1];
10141  return VMA_MAX(
10142  firstSuballoc.offset,
10143  size - (lastSuballoc.offset + lastSuballoc.size));
10144  }
10145  break;
10146 
10147  case SECOND_VECTOR_RING_BUFFER:
10148  /*
10149  Available space is only between end of 2nd and beginning of 1st.
10150  */
10151  {
10152  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10153  const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back();
10154  const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount];
10155  return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size);
10156  }
10157  break;
10158 
10159  case SECOND_VECTOR_DOUBLE_STACK:
10160  /*
10161  Available space is only between end of 1st and top of 2nd.
10162  */
10163  {
10164  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10165  const VmaSuballocation& topSuballoc2nd = suballocations2nd.back();
10166  const VmaSuballocation& lastSuballoc1st = suballocations1st.back();
10167  return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size);
10168  }
10169  break;
10170 
10171  default:
10172  VMA_ASSERT(0);
10173  return 0;
10174  }
10175 }
10176 
10177 void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
10178 {
10179  const VkDeviceSize size = GetSize();
10180  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10181  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10182  const size_t suballoc1stCount = suballocations1st.size();
10183  const size_t suballoc2ndCount = suballocations2nd.size();
10184 
10185  outInfo.blockCount = 1;
10186  outInfo.allocationCount = (uint32_t)GetAllocationCount();
10187  outInfo.unusedRangeCount = 0;
10188  outInfo.usedBytes = 0;
10189  outInfo.allocationSizeMin = UINT64_MAX;
10190  outInfo.allocationSizeMax = 0;
10191  outInfo.unusedRangeSizeMin = UINT64_MAX;
10192  outInfo.unusedRangeSizeMax = 0;
10193 
10194  VkDeviceSize lastOffset = 0;
10195 
10196  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10197  {
10198  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10199  size_t nextAlloc2ndIndex = 0;
10200  while(lastOffset < freeSpace2ndTo1stEnd)
10201  {
10202  // Find next non-null allocation or move nextAllocIndex to the end.
10203  while(nextAlloc2ndIndex < suballoc2ndCount &&
10204  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10205  {
10206  ++nextAlloc2ndIndex;
10207  }
10208 
10209  // Found non-null allocation.
10210  if(nextAlloc2ndIndex < suballoc2ndCount)
10211  {
10212  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10213 
10214  // 1. Process free space before this allocation.
10215  if(lastOffset < suballoc.offset)
10216  {
10217  // There is free space from lastOffset to suballoc.offset.
10218  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10219  ++outInfo.unusedRangeCount;
10220  outInfo.unusedBytes += unusedRangeSize;
10221  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10222  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10223  }
10224 
10225  // 2. Process this allocation.
10226  // There is allocation with suballoc.offset, suballoc.size.
10227  outInfo.usedBytes += suballoc.size;
10228  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10229  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10230 
10231  // 3. Prepare for next iteration.
10232  lastOffset = suballoc.offset + suballoc.size;
10233  ++nextAlloc2ndIndex;
10234  }
10235  // We are at the end.
10236  else
10237  {
10238  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10239  if(lastOffset < freeSpace2ndTo1stEnd)
10240  {
10241  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10242  ++outInfo.unusedRangeCount;
10243  outInfo.unusedBytes += unusedRangeSize;
10244  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10245  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10246  }
10247 
10248  // End of loop.
10249  lastOffset = freeSpace2ndTo1stEnd;
10250  }
10251  }
10252  }
10253 
10254  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10255  const VkDeviceSize freeSpace1stTo2ndEnd =
10256  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10257  while(lastOffset < freeSpace1stTo2ndEnd)
10258  {
10259  // Find next non-null allocation or move nextAllocIndex to the end.
10260  while(nextAlloc1stIndex < suballoc1stCount &&
10261  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10262  {
10263  ++nextAlloc1stIndex;
10264  }
10265 
10266  // Found non-null allocation.
10267  if(nextAlloc1stIndex < suballoc1stCount)
10268  {
10269  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10270 
10271  // 1. Process free space before this allocation.
10272  if(lastOffset < suballoc.offset)
10273  {
10274  // There is free space from lastOffset to suballoc.offset.
10275  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10276  ++outInfo.unusedRangeCount;
10277  outInfo.unusedBytes += unusedRangeSize;
10278  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10279  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10280  }
10281 
10282  // 2. Process this allocation.
10283  // There is allocation with suballoc.offset, suballoc.size.
10284  outInfo.usedBytes += suballoc.size;
10285  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10286  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10287 
10288  // 3. Prepare for next iteration.
10289  lastOffset = suballoc.offset + suballoc.size;
10290  ++nextAlloc1stIndex;
10291  }
10292  // We are at the end.
10293  else
10294  {
10295  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10296  if(lastOffset < freeSpace1stTo2ndEnd)
10297  {
10298  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10299  ++outInfo.unusedRangeCount;
10300  outInfo.unusedBytes += unusedRangeSize;
10301  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10302  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10303  }
10304 
10305  // End of loop.
10306  lastOffset = freeSpace1stTo2ndEnd;
10307  }
10308  }
10309 
10310  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10311  {
10312  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10313  while(lastOffset < size)
10314  {
10315  // Find next non-null allocation or move nextAllocIndex to the end.
10316  while(nextAlloc2ndIndex != SIZE_MAX &&
10317  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10318  {
10319  --nextAlloc2ndIndex;
10320  }
10321 
10322  // Found non-null allocation.
10323  if(nextAlloc2ndIndex != SIZE_MAX)
10324  {
10325  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10326 
10327  // 1. Process free space before this allocation.
10328  if(lastOffset < suballoc.offset)
10329  {
10330  // There is free space from lastOffset to suballoc.offset.
10331  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10332  ++outInfo.unusedRangeCount;
10333  outInfo.unusedBytes += unusedRangeSize;
10334  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10335  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10336  }
10337 
10338  // 2. Process this allocation.
10339  // There is allocation with suballoc.offset, suballoc.size.
10340  outInfo.usedBytes += suballoc.size;
10341  outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size);
10342  outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size);
10343 
10344  // 3. Prepare for next iteration.
10345  lastOffset = suballoc.offset + suballoc.size;
10346  --nextAlloc2ndIndex;
10347  }
10348  // We are at the end.
10349  else
10350  {
10351  // There is free space from lastOffset to size.
10352  if(lastOffset < size)
10353  {
10354  const VkDeviceSize unusedRangeSize = size - lastOffset;
10355  ++outInfo.unusedRangeCount;
10356  outInfo.unusedBytes += unusedRangeSize;
10357  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize);
10358  outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize);
10359  }
10360 
10361  // End of loop.
10362  lastOffset = size;
10363  }
10364  }
10365  }
10366 
10367  outInfo.unusedBytes = size - outInfo.usedBytes;
10368 }
10369 
10370 void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const
10371 {
10372  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10373  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10374  const VkDeviceSize size = GetSize();
10375  const size_t suballoc1stCount = suballocations1st.size();
10376  const size_t suballoc2ndCount = suballocations2nd.size();
10377 
10378  inoutStats.size += size;
10379 
10380  VkDeviceSize lastOffset = 0;
10381 
10382  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10383  {
10384  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10385  size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
10386  while(lastOffset < freeSpace2ndTo1stEnd)
10387  {
10388  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10389  while(nextAlloc2ndIndex < suballoc2ndCount &&
10390  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10391  {
10392  ++nextAlloc2ndIndex;
10393  }
10394 
10395  // Found non-null allocation.
10396  if(nextAlloc2ndIndex < suballoc2ndCount)
10397  {
10398  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10399 
10400  // 1. Process free space before this allocation.
10401  if(lastOffset < suballoc.offset)
10402  {
10403  // There is free space from lastOffset to suballoc.offset.
10404  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10405  inoutStats.unusedSize += unusedRangeSize;
10406  ++inoutStats.unusedRangeCount;
10407  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10408  }
10409 
10410  // 2. Process this allocation.
10411  // There is allocation with suballoc.offset, suballoc.size.
10412  ++inoutStats.allocationCount;
10413 
10414  // 3. Prepare for next iteration.
10415  lastOffset = suballoc.offset + suballoc.size;
10416  ++nextAlloc2ndIndex;
10417  }
10418  // We are at the end.
10419  else
10420  {
10421  if(lastOffset < freeSpace2ndTo1stEnd)
10422  {
10423  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10424  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10425  inoutStats.unusedSize += unusedRangeSize;
10426  ++inoutStats.unusedRangeCount;
10427  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10428  }
10429 
10430  // End of loop.
10431  lastOffset = freeSpace2ndTo1stEnd;
10432  }
10433  }
10434  }
10435 
10436  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10437  const VkDeviceSize freeSpace1stTo2ndEnd =
10438  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10439  while(lastOffset < freeSpace1stTo2ndEnd)
10440  {
10441  // Find next non-null allocation or move nextAllocIndex to the end.
10442  while(nextAlloc1stIndex < suballoc1stCount &&
10443  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10444  {
10445  ++nextAlloc1stIndex;
10446  }
10447 
10448  // Found non-null allocation.
10449  if(nextAlloc1stIndex < suballoc1stCount)
10450  {
10451  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10452 
10453  // 1. Process free space before this allocation.
10454  if(lastOffset < suballoc.offset)
10455  {
10456  // There is free space from lastOffset to suballoc.offset.
10457  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10458  inoutStats.unusedSize += unusedRangeSize;
10459  ++inoutStats.unusedRangeCount;
10460  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10461  }
10462 
10463  // 2. Process this allocation.
10464  // There is allocation with suballoc.offset, suballoc.size.
10465  ++inoutStats.allocationCount;
10466 
10467  // 3. Prepare for next iteration.
10468  lastOffset = suballoc.offset + suballoc.size;
10469  ++nextAlloc1stIndex;
10470  }
10471  // We are at the end.
10472  else
10473  {
10474  if(lastOffset < freeSpace1stTo2ndEnd)
10475  {
10476  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10477  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10478  inoutStats.unusedSize += unusedRangeSize;
10479  ++inoutStats.unusedRangeCount;
10480  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10481  }
10482 
10483  // End of loop.
10484  lastOffset = freeSpace1stTo2ndEnd;
10485  }
10486  }
10487 
10488  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10489  {
10490  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10491  while(lastOffset < size)
10492  {
10493  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10494  while(nextAlloc2ndIndex != SIZE_MAX &&
10495  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10496  {
10497  --nextAlloc2ndIndex;
10498  }
10499 
10500  // Found non-null allocation.
10501  if(nextAlloc2ndIndex != SIZE_MAX)
10502  {
10503  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10504 
10505  // 1. Process free space before this allocation.
10506  if(lastOffset < suballoc.offset)
10507  {
10508  // There is free space from lastOffset to suballoc.offset.
10509  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10510  inoutStats.unusedSize += unusedRangeSize;
10511  ++inoutStats.unusedRangeCount;
10512  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10513  }
10514 
10515  // 2. Process this allocation.
10516  // There is allocation with suballoc.offset, suballoc.size.
10517  ++inoutStats.allocationCount;
10518 
10519  // 3. Prepare for next iteration.
10520  lastOffset = suballoc.offset + suballoc.size;
10521  --nextAlloc2ndIndex;
10522  }
10523  // We are at the end.
10524  else
10525  {
10526  if(lastOffset < size)
10527  {
10528  // There is free space from lastOffset to size.
10529  const VkDeviceSize unusedRangeSize = size - lastOffset;
10530  inoutStats.unusedSize += unusedRangeSize;
10531  ++inoutStats.unusedRangeCount;
10532  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize);
10533  }
10534 
10535  // End of loop.
10536  lastOffset = size;
10537  }
10538  }
10539  }
10540 }
10541 
10542 #if VMA_STATS_STRING_ENABLED
10543 void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
10544 {
10545  const VkDeviceSize size = GetSize();
10546  const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10547  const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10548  const size_t suballoc1stCount = suballocations1st.size();
10549  const size_t suballoc2ndCount = suballocations2nd.size();
10550 
10551  // FIRST PASS
10552 
10553  size_t unusedRangeCount = 0;
10554  VkDeviceSize usedBytes = 0;
10555 
10556  VkDeviceSize lastOffset = 0;
10557 
10558  size_t alloc2ndCount = 0;
10559  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10560  {
10561  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10562  size_t nextAlloc2ndIndex = 0;
10563  while(lastOffset < freeSpace2ndTo1stEnd)
10564  {
10565  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10566  while(nextAlloc2ndIndex < suballoc2ndCount &&
10567  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10568  {
10569  ++nextAlloc2ndIndex;
10570  }
10571 
10572  // Found non-null allocation.
10573  if(nextAlloc2ndIndex < suballoc2ndCount)
10574  {
10575  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10576 
10577  // 1. Process free space before this allocation.
10578  if(lastOffset < suballoc.offset)
10579  {
10580  // There is free space from lastOffset to suballoc.offset.
10581  ++unusedRangeCount;
10582  }
10583 
10584  // 2. Process this allocation.
10585  // There is allocation with suballoc.offset, suballoc.size.
10586  ++alloc2ndCount;
10587  usedBytes += suballoc.size;
10588 
10589  // 3. Prepare for next iteration.
10590  lastOffset = suballoc.offset + suballoc.size;
10591  ++nextAlloc2ndIndex;
10592  }
10593  // We are at the end.
10594  else
10595  {
10596  if(lastOffset < freeSpace2ndTo1stEnd)
10597  {
10598  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10599  ++unusedRangeCount;
10600  }
10601 
10602  // End of loop.
10603  lastOffset = freeSpace2ndTo1stEnd;
10604  }
10605  }
10606  }
10607 
10608  size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
10609  size_t alloc1stCount = 0;
10610  const VkDeviceSize freeSpace1stTo2ndEnd =
10611  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
10612  while(lastOffset < freeSpace1stTo2ndEnd)
10613  {
10614  // Find next non-null allocation or move nextAllocIndex to the end.
10615  while(nextAlloc1stIndex < suballoc1stCount &&
10616  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10617  {
10618  ++nextAlloc1stIndex;
10619  }
10620 
10621  // Found non-null allocation.
10622  if(nextAlloc1stIndex < suballoc1stCount)
10623  {
10624  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10625 
10626  // 1. Process free space before this allocation.
10627  if(lastOffset < suballoc.offset)
10628  {
10629  // There is free space from lastOffset to suballoc.offset.
10630  ++unusedRangeCount;
10631  }
10632 
10633  // 2. Process this allocation.
10634  // There is allocation with suballoc.offset, suballoc.size.
10635  ++alloc1stCount;
10636  usedBytes += suballoc.size;
10637 
10638  // 3. Prepare for next iteration.
10639  lastOffset = suballoc.offset + suballoc.size;
10640  ++nextAlloc1stIndex;
10641  }
10642  // We are at the end.
10643  else
10644  {
10645  if(lastOffset < size)
10646  {
10647  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10648  ++unusedRangeCount;
10649  }
10650 
10651  // End of loop.
10652  lastOffset = freeSpace1stTo2ndEnd;
10653  }
10654  }
10655 
10656  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10657  {
10658  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10659  while(lastOffset < size)
10660  {
10661  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10662  while(nextAlloc2ndIndex != SIZE_MAX &&
10663  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10664  {
10665  --nextAlloc2ndIndex;
10666  }
10667 
10668  // Found non-null allocation.
10669  if(nextAlloc2ndIndex != SIZE_MAX)
10670  {
10671  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10672 
10673  // 1. Process free space before this allocation.
10674  if(lastOffset < suballoc.offset)
10675  {
10676  // There is free space from lastOffset to suballoc.offset.
10677  ++unusedRangeCount;
10678  }
10679 
10680  // 2. Process this allocation.
10681  // There is allocation with suballoc.offset, suballoc.size.
10682  ++alloc2ndCount;
10683  usedBytes += suballoc.size;
10684 
10685  // 3. Prepare for next iteration.
10686  lastOffset = suballoc.offset + suballoc.size;
10687  --nextAlloc2ndIndex;
10688  }
10689  // We are at the end.
10690  else
10691  {
10692  if(lastOffset < size)
10693  {
10694  // There is free space from lastOffset to size.
10695  ++unusedRangeCount;
10696  }
10697 
10698  // End of loop.
10699  lastOffset = size;
10700  }
10701  }
10702  }
10703 
10704  const VkDeviceSize unusedBytes = size - usedBytes;
10705  PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
10706 
10707  // SECOND PASS
10708  lastOffset = 0;
10709 
10710  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10711  {
10712  const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
10713  size_t nextAlloc2ndIndex = 0;
10714  while(lastOffset < freeSpace2ndTo1stEnd)
10715  {
10716  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10717  while(nextAlloc2ndIndex < suballoc2ndCount &&
10718  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10719  {
10720  ++nextAlloc2ndIndex;
10721  }
10722 
10723  // Found non-null allocation.
10724  if(nextAlloc2ndIndex < suballoc2ndCount)
10725  {
10726  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10727 
10728  // 1. Process free space before this allocation.
10729  if(lastOffset < suballoc.offset)
10730  {
10731  // There is free space from lastOffset to suballoc.offset.
10732  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10733  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10734  }
10735 
10736  // 2. Process this allocation.
10737  // There is allocation with suballoc.offset, suballoc.size.
10738  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10739 
10740  // 3. Prepare for next iteration.
10741  lastOffset = suballoc.offset + suballoc.size;
10742  ++nextAlloc2ndIndex;
10743  }
10744  // We are at the end.
10745  else
10746  {
10747  if(lastOffset < freeSpace2ndTo1stEnd)
10748  {
10749  // There is free space from lastOffset to freeSpace2ndTo1stEnd.
10750  const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
10751  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10752  }
10753 
10754  // End of loop.
10755  lastOffset = freeSpace2ndTo1stEnd;
10756  }
10757  }
10758  }
10759 
10760  nextAlloc1stIndex = m_1stNullItemsBeginCount;
10761  while(lastOffset < freeSpace1stTo2ndEnd)
10762  {
10763  // Find next non-null allocation or move nextAllocIndex to the end.
10764  while(nextAlloc1stIndex < suballoc1stCount &&
10765  suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE)
10766  {
10767  ++nextAlloc1stIndex;
10768  }
10769 
10770  // Found non-null allocation.
10771  if(nextAlloc1stIndex < suballoc1stCount)
10772  {
10773  const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
10774 
10775  // 1. Process free space before this allocation.
10776  if(lastOffset < suballoc.offset)
10777  {
10778  // There is free space from lastOffset to suballoc.offset.
10779  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10780  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10781  }
10782 
10783  // 2. Process this allocation.
10784  // There is allocation with suballoc.offset, suballoc.size.
10785  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10786 
10787  // 3. Prepare for next iteration.
10788  lastOffset = suballoc.offset + suballoc.size;
10789  ++nextAlloc1stIndex;
10790  }
10791  // We are at the end.
10792  else
10793  {
10794  if(lastOffset < freeSpace1stTo2ndEnd)
10795  {
10796  // There is free space from lastOffset to freeSpace1stTo2ndEnd.
10797  const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
10798  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10799  }
10800 
10801  // End of loop.
10802  lastOffset = freeSpace1stTo2ndEnd;
10803  }
10804  }
10805 
10806  if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
10807  {
10808  size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
10809  while(lastOffset < size)
10810  {
10811  // Find next non-null allocation or move nextAlloc2ndIndex to the end.
10812  while(nextAlloc2ndIndex != SIZE_MAX &&
10813  suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE)
10814  {
10815  --nextAlloc2ndIndex;
10816  }
10817 
10818  // Found non-null allocation.
10819  if(nextAlloc2ndIndex != SIZE_MAX)
10820  {
10821  const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
10822 
10823  // 1. Process free space before this allocation.
10824  if(lastOffset < suballoc.offset)
10825  {
10826  // There is free space from lastOffset to suballoc.offset.
10827  const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
10828  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10829  }
10830 
10831  // 2. Process this allocation.
10832  // There is allocation with suballoc.offset, suballoc.size.
10833  PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation);
10834 
10835  // 3. Prepare for next iteration.
10836  lastOffset = suballoc.offset + suballoc.size;
10837  --nextAlloc2ndIndex;
10838  }
10839  // We are at the end.
10840  else
10841  {
10842  if(lastOffset < size)
10843  {
10844  // There is free space from lastOffset to size.
10845  const VkDeviceSize unusedRangeSize = size - lastOffset;
10846  PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
10847  }
10848 
10849  // End of loop.
10850  lastOffset = size;
10851  }
10852  }
10853  }
10854 
10855  PrintDetailedMap_End(json);
10856 }
10857 #endif // #if VMA_STATS_STRING_ENABLED
10858 
10859 bool VmaBlockMetadata_Linear::CreateAllocationRequest(
10860  uint32_t currentFrameIndex,
10861  uint32_t frameInUseCount,
10862  VkDeviceSize bufferImageGranularity,
10863  VkDeviceSize allocSize,
10864  VkDeviceSize allocAlignment,
10865  bool upperAddress,
10866  VmaSuballocationType allocType,
10867  bool canMakeOtherLost,
10868  uint32_t strategy,
10869  VmaAllocationRequest* pAllocationRequest)
10870 {
10871  VMA_ASSERT(allocSize > 0);
10872  VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
10873  VMA_ASSERT(pAllocationRequest != VMA_NULL);
10874  VMA_HEAVY_ASSERT(Validate());
10875  return upperAddress ?
10876  CreateAllocationRequest_UpperAddress(
10877  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10878  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) :
10879  CreateAllocationRequest_LowerAddress(
10880  currentFrameIndex, frameInUseCount, bufferImageGranularity,
10881  allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest);
10882 }
10883 
10884 bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
10885  uint32_t currentFrameIndex,
10886  uint32_t frameInUseCount,
10887  VkDeviceSize bufferImageGranularity,
10888  VkDeviceSize allocSize,
10889  VkDeviceSize allocAlignment,
10890  VmaSuballocationType allocType,
10891  bool canMakeOtherLost,
10892  uint32_t strategy,
10893  VmaAllocationRequest* pAllocationRequest)
10894 {
10895  const VkDeviceSize size = GetSize();
10896  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
10897  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
10898 
10899  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
10900  {
10901  VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
10902  return false;
10903  }
10904 
10905  // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
10906  if(allocSize > size)
10907  {
10908  return false;
10909  }
10910  VkDeviceSize resultBaseOffset = size - allocSize;
10911  if(!suballocations2nd.empty())
10912  {
10913  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
10914  resultBaseOffset = lastSuballoc.offset - allocSize;
10915  if(allocSize > lastSuballoc.offset)
10916  {
10917  return false;
10918  }
10919  }
10920 
10921  // Start from offset equal to end of free space.
10922  VkDeviceSize resultOffset = resultBaseOffset;
10923 
10924  // Apply VMA_DEBUG_MARGIN at the end.
10925  if(VMA_DEBUG_MARGIN > 0)
10926  {
10927  if(resultOffset < VMA_DEBUG_MARGIN)
10928  {
10929  return false;
10930  }
10931  resultOffset -= VMA_DEBUG_MARGIN;
10932  }
10933 
10934  // Apply alignment.
10935  resultOffset = VmaAlignDown(resultOffset, allocAlignment);
10936 
10937  // Check next suballocations from 2nd for BufferImageGranularity conflicts.
10938  // Make bigger alignment if necessary.
10939  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
10940  {
10941  bool bufferImageGranularityConflict = false;
10942  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
10943  {
10944  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
10945  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
10946  {
10947  if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
10948  {
10949  bufferImageGranularityConflict = true;
10950  break;
10951  }
10952  }
10953  else
10954  // Already on previous page.
10955  break;
10956  }
10957  if(bufferImageGranularityConflict)
10958  {
10959  resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
10960  }
10961  }
10962 
10963  // There is enough free space.
10964  const VkDeviceSize endOf1st = !suballocations1st.empty() ?
10965  suballocations1st.back().offset + suballocations1st.back().size :
10966  0;
10967  if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset)
10968  {
10969  // Check previous suballocations for BufferImageGranularity conflicts.
10970  // If conflict exists, allocation cannot be made here.
10971  if(bufferImageGranularity > 1)
10972  {
10973  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
10974  {
10975  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
10976  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
10977  {
10978  if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
10979  {
10980  return false;
10981  }
10982  }
10983  else
10984  {
10985  // Already on next page.
10986  break;
10987  }
10988  }
10989  }
10990 
10991  // All tests passed: Success.
10992  pAllocationRequest->offset = resultOffset;
10993  pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st;
10994  pAllocationRequest->sumItemSize = 0;
10995  // pAllocationRequest->item unused.
10996  pAllocationRequest->itemsToMakeLostCount = 0;
10997  pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
10998  return true;
10999  }
11000 
11001  return false;
11002 }
11003 
11004 bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
11005  uint32_t currentFrameIndex,
11006  uint32_t frameInUseCount,
11007  VkDeviceSize bufferImageGranularity,
11008  VkDeviceSize allocSize,
11009  VkDeviceSize allocAlignment,
11010  VmaSuballocationType allocType,
11011  bool canMakeOtherLost,
11012  uint32_t strategy,
11013  VmaAllocationRequest* pAllocationRequest)
11014 {
11015  const VkDeviceSize size = GetSize();
11016  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11017  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11018 
11019  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11020  {
11021  // Try to allocate at the end of 1st vector.
11022 
11023  VkDeviceSize resultBaseOffset = 0;
11024  if(!suballocations1st.empty())
11025  {
11026  const VmaSuballocation& lastSuballoc = suballocations1st.back();
11027  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11028  }
11029 
11030  // Start from offset equal to beginning of free space.
11031  VkDeviceSize resultOffset = resultBaseOffset;
11032 
11033  // Apply VMA_DEBUG_MARGIN at the beginning.
11034  if(VMA_DEBUG_MARGIN > 0)
11035  {
11036  resultOffset += VMA_DEBUG_MARGIN;
11037  }
11038 
11039  // Apply alignment.
11040  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11041 
11042  // Check previous suballocations for BufferImageGranularity conflicts.
11043  // Make bigger alignment if necessary.
11044  if(bufferImageGranularity > 1 && !suballocations1st.empty())
11045  {
11046  bool bufferImageGranularityConflict = false;
11047  for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
11048  {
11049  const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
11050  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11051  {
11052  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11053  {
11054  bufferImageGranularityConflict = true;
11055  break;
11056  }
11057  }
11058  else
11059  // Already on previous page.
11060  break;
11061  }
11062  if(bufferImageGranularityConflict)
11063  {
11064  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11065  }
11066  }
11067 
11068  const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
11069  suballocations2nd.back().offset : size;
11070 
11071  // There is enough free space at the end after alignment.
11072  if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd)
11073  {
11074  // Check next suballocations for BufferImageGranularity conflicts.
11075  // If conflict exists, allocation cannot be made here.
11076  if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11077  {
11078  for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
11079  {
11080  const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
11081  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11082  {
11083  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11084  {
11085  return false;
11086  }
11087  }
11088  else
11089  {
11090  // Already on previous page.
11091  break;
11092  }
11093  }
11094  }
11095 
11096  // All tests passed: Success.
11097  pAllocationRequest->offset = resultOffset;
11098  pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset;
11099  pAllocationRequest->sumItemSize = 0;
11100  // pAllocationRequest->item, customData unused.
11101  pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
11102  pAllocationRequest->itemsToMakeLostCount = 0;
11103  return true;
11104  }
11105  }
11106 
11107  // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
11108  // beginning of 1st vector as the end of free space.
11109  if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11110  {
11111  VMA_ASSERT(!suballocations1st.empty());
11112 
11113  VkDeviceSize resultBaseOffset = 0;
11114  if(!suballocations2nd.empty())
11115  {
11116  const VmaSuballocation& lastSuballoc = suballocations2nd.back();
11117  resultBaseOffset = lastSuballoc.offset + lastSuballoc.size;
11118  }
11119 
11120  // Start from offset equal to beginning of free space.
11121  VkDeviceSize resultOffset = resultBaseOffset;
11122 
11123  // Apply VMA_DEBUG_MARGIN at the beginning.
11124  if(VMA_DEBUG_MARGIN > 0)
11125  {
11126  resultOffset += VMA_DEBUG_MARGIN;
11127  }
11128 
11129  // Apply alignment.
11130  resultOffset = VmaAlignUp(resultOffset, allocAlignment);
11131 
11132  // Check previous suballocations for BufferImageGranularity conflicts.
11133  // Make bigger alignment if necessary.
11134  if(bufferImageGranularity > 1 && !suballocations2nd.empty())
11135  {
11136  bool bufferImageGranularityConflict = false;
11137  for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
11138  {
11139  const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
11140  if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
11141  {
11142  if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
11143  {
11144  bufferImageGranularityConflict = true;
11145  break;
11146  }
11147  }
11148  else
11149  // Already on previous page.
11150  break;
11151  }
11152  if(bufferImageGranularityConflict)
11153  {
11154  resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
11155  }
11156  }
11157 
11158  pAllocationRequest->itemsToMakeLostCount = 0;
11159  pAllocationRequest->sumItemSize = 0;
11160  size_t index1st = m_1stNullItemsBeginCount;
11161 
11162  if(canMakeOtherLost)
11163  {
11164  while(index1st < suballocations1st.size() &&
11165  resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset)
11166  {
11167  // Next colliding allocation at the beginning of 1st vector found. Try to make it lost.
11168  const VmaSuballocation& suballoc = suballocations1st[index1st];
11169  if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
11170  {
11171  // No problem.
11172  }
11173  else
11174  {
11175  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11176  if(suballoc.hAllocation->CanBecomeLost() &&
11177  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11178  {
11179  ++pAllocationRequest->itemsToMakeLostCount;
11180  pAllocationRequest->sumItemSize += suballoc.size;
11181  }
11182  else
11183  {
11184  return false;
11185  }
11186  }
11187  ++index1st;
11188  }
11189 
11190  // Check next suballocations for BufferImageGranularity conflicts.
11191  // If conflict exists, we must mark more allocations lost or fail.
11192  if(bufferImageGranularity > 1)
11193  {
11194  while(index1st < suballocations1st.size())
11195  {
11196  const VmaSuballocation& suballoc = suballocations1st[index1st];
11197  if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity))
11198  {
11199  if(suballoc.hAllocation != VK_NULL_HANDLE)
11200  {
11201  // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type).
11202  if(suballoc.hAllocation->CanBecomeLost() &&
11203  suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex)
11204  {
11205  ++pAllocationRequest->itemsToMakeLostCount;
11206  pAllocationRequest->sumItemSize += suballoc.size;
11207  }
11208  else
11209  {
11210  return false;
11211  }
11212  }
11213  }
11214  else
11215  {
11216  // Already on next page.
11217  break;
11218  }
11219  ++index1st;
11220  }
11221  }
11222 
11223  // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost.
11224  if(index1st == suballocations1st.size() &&
11225  resultOffset + allocSize + VMA_DEBUG_MARGIN > size)
11226  {
11227  // TODO: This is a known bug that it's not yet implemented and the allocation is failing.
11228  VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost.");
11229  }
11230  }
11231 
11232  // There is enough free space at the end after alignment.
11233  if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) ||
11234  (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset))
11235  {
11236  // Check next suballocations for BufferImageGranularity conflicts.
11237  // If conflict exists, allocation cannot be made here.
11238  if(bufferImageGranularity > 1)
11239  {
11240  for(size_t nextSuballocIndex = index1st;
11241  nextSuballocIndex < suballocations1st.size();
11242  nextSuballocIndex++)
11243  {
11244  const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
11245  if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
11246  {
11247  if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
11248  {
11249  return false;
11250  }
11251  }
11252  else
11253  {
11254  // Already on next page.
11255  break;
11256  }
11257  }
11258  }
11259 
11260  // All tests passed: Success.
11261  pAllocationRequest->offset = resultOffset;
11262  pAllocationRequest->sumFreeSize =
11263  (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size)
11264  - resultBaseOffset
11265  - pAllocationRequest->sumItemSize;
11266  pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
11267  // pAllocationRequest->item, customData unused.
11268  return true;
11269  }
11270  }
11271 
11272  return false;
11273 }
11274 
11275 bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost(
11276  uint32_t currentFrameIndex,
11277  uint32_t frameInUseCount,
11278  VmaAllocationRequest* pAllocationRequest)
11279 {
11280  if(pAllocationRequest->itemsToMakeLostCount == 0)
11281  {
11282  return true;
11283  }
11284 
11285  VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER);
11286 
11287  // We always start from 1st.
11288  SuballocationVectorType* suballocations = &AccessSuballocations1st();
11289  size_t index = m_1stNullItemsBeginCount;
11290  size_t madeLostCount = 0;
11291  while(madeLostCount < pAllocationRequest->itemsToMakeLostCount)
11292  {
11293  if(index == suballocations->size())
11294  {
11295  index = 0;
11296  // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st.
11297  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11298  {
11299  suballocations = &AccessSuballocations2nd();
11300  }
11301  // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY:
11302  // suballocations continues pointing at AccessSuballocations1st().
11303  VMA_ASSERT(!suballocations->empty());
11304  }
11305  VmaSuballocation& suballoc = (*suballocations)[index];
11306  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11307  {
11308  VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE);
11309  VMA_ASSERT(suballoc.hAllocation->CanBecomeLost());
11310  if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11311  {
11312  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11313  suballoc.hAllocation = VK_NULL_HANDLE;
11314  m_SumFreeSize += suballoc.size;
11315  if(suballocations == &AccessSuballocations1st())
11316  {
11317  ++m_1stNullItemsMiddleCount;
11318  }
11319  else
11320  {
11321  ++m_2ndNullItemsCount;
11322  }
11323  ++madeLostCount;
11324  }
11325  else
11326  {
11327  return false;
11328  }
11329  }
11330  ++index;
11331  }
11332 
11333  CleanupAfterFree();
11334  //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree().
11335 
11336  return true;
11337 }
11338 
11339 uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11340 {
11341  uint32_t lostAllocationCount = 0;
11342 
11343  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11344  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11345  {
11346  VmaSuballocation& suballoc = suballocations1st[i];
11347  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11348  suballoc.hAllocation->CanBecomeLost() &&
11349  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11350  {
11351  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11352  suballoc.hAllocation = VK_NULL_HANDLE;
11353  ++m_1stNullItemsMiddleCount;
11354  m_SumFreeSize += suballoc.size;
11355  ++lostAllocationCount;
11356  }
11357  }
11358 
11359  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11360  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11361  {
11362  VmaSuballocation& suballoc = suballocations2nd[i];
11363  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE &&
11364  suballoc.hAllocation->CanBecomeLost() &&
11365  suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount))
11366  {
11367  suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11368  suballoc.hAllocation = VK_NULL_HANDLE;
11369  ++m_2ndNullItemsCount;
11370  m_SumFreeSize += suballoc.size;
11371  ++lostAllocationCount;
11372  }
11373  }
11374 
11375  if(lostAllocationCount)
11376  {
11377  CleanupAfterFree();
11378  }
11379 
11380  return lostAllocationCount;
11381 }
11382 
11383 VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
11384 {
11385  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11386  for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
11387  {
11388  const VmaSuballocation& suballoc = suballocations1st[i];
11389  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11390  {
11391  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11392  {
11393  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11394  return VK_ERROR_VALIDATION_FAILED_EXT;
11395  }
11396  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11397  {
11398  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11399  return VK_ERROR_VALIDATION_FAILED_EXT;
11400  }
11401  }
11402  }
11403 
11404  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11405  for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
11406  {
11407  const VmaSuballocation& suballoc = suballocations2nd[i];
11408  if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
11409  {
11410  if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN))
11411  {
11412  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!");
11413  return VK_ERROR_VALIDATION_FAILED_EXT;
11414  }
11415  if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
11416  {
11417  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
11418  return VK_ERROR_VALIDATION_FAILED_EXT;
11419  }
11420  }
11421  }
11422 
11423  return VK_SUCCESS;
11424 }
11425 
11426 void VmaBlockMetadata_Linear::Alloc(
11427  const VmaAllocationRequest& request,
11428  VmaSuballocationType type,
11429  VkDeviceSize allocSize,
11430  VmaAllocation hAllocation)
11431 {
11432  const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type };
11433 
11434  switch(request.type)
11435  {
11436  case VmaAllocationRequestType::UpperAddress:
11437  {
11438  VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
11439  "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
11440  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11441  suballocations2nd.push_back(newSuballoc);
11442  m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
11443  }
11444  break;
11445  case VmaAllocationRequestType::EndOf1st:
11446  {
11447  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11448 
11449  VMA_ASSERT(suballocations1st.empty() ||
11450  request.offset >= suballocations1st.back().offset + suballocations1st.back().size);
11451  // Check if it fits before the end of the block.
11452  VMA_ASSERT(request.offset + allocSize <= GetSize());
11453 
11454  suballocations1st.push_back(newSuballoc);
11455  }
11456  break;
11457  case VmaAllocationRequestType::EndOf2nd:
11458  {
11459  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11460  // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
11461  VMA_ASSERT(!suballocations1st.empty() &&
11462  request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset);
11463  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11464 
11465  switch(m_2ndVectorMode)
11466  {
11467  case SECOND_VECTOR_EMPTY:
11468  // First allocation from second part ring buffer.
11469  VMA_ASSERT(suballocations2nd.empty());
11470  m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
11471  break;
11472  case SECOND_VECTOR_RING_BUFFER:
11473  // 2-part ring buffer is already started.
11474  VMA_ASSERT(!suballocations2nd.empty());
11475  break;
11476  case SECOND_VECTOR_DOUBLE_STACK:
11477  VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
11478  break;
11479  default:
11480  VMA_ASSERT(0);
11481  }
11482 
11483  suballocations2nd.push_back(newSuballoc);
11484  }
11485  break;
11486  default:
11487  VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
11488  }
11489 
11490  m_SumFreeSize -= newSuballoc.size;
11491 }
11492 
11493 void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation)
11494 {
11495  FreeAtOffset(allocation->GetOffset());
11496 }
11497 
11498 void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset)
11499 {
11500  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11501  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11502 
11503  if(!suballocations1st.empty())
11504  {
11505  // First allocation: Mark it as next empty at the beginning.
11506  VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
11507  if(firstSuballoc.offset == offset)
11508  {
11509  firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
11510  firstSuballoc.hAllocation = VK_NULL_HANDLE;
11511  m_SumFreeSize += firstSuballoc.size;
11512  ++m_1stNullItemsBeginCount;
11513  CleanupAfterFree();
11514  return;
11515  }
11516  }
11517 
11518  // Last allocation in 2-part ring buffer or top of upper stack (same logic).
11519  if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
11520  m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
11521  {
11522  VmaSuballocation& lastSuballoc = suballocations2nd.back();
11523  if(lastSuballoc.offset == offset)
11524  {
11525  m_SumFreeSize += lastSuballoc.size;
11526  suballocations2nd.pop_back();
11527  CleanupAfterFree();
11528  return;
11529  }
11530  }
11531  // Last allocation in 1st vector.
11532  else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY)
11533  {
11534  VmaSuballocation& lastSuballoc = suballocations1st.back();
11535  if(lastSuballoc.offset == offset)
11536  {
11537  m_SumFreeSize += lastSuballoc.size;
11538  suballocations1st.pop_back();
11539  CleanupAfterFree();
11540  return;
11541  }
11542  }
11543 
11544  // Item from the middle of 1st vector.
11545  {
11546  VmaSuballocation refSuballoc;
11547  refSuballoc.offset = offset;
11548  // Rest of members stays uninitialized intentionally for better performance.
11549  SuballocationVectorType::iterator it = VmaBinaryFindSorted(
11550  suballocations1st.begin() + m_1stNullItemsBeginCount,
11551  suballocations1st.end(),
11552  refSuballoc,
11553  VmaSuballocationOffsetLess());
11554  if(it != suballocations1st.end())
11555  {
11556  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11557  it->hAllocation = VK_NULL_HANDLE;
11558  ++m_1stNullItemsMiddleCount;
11559  m_SumFreeSize += it->size;
11560  CleanupAfterFree();
11561  return;
11562  }
11563  }
11564 
11565  if(m_2ndVectorMode != SECOND_VECTOR_EMPTY)
11566  {
11567  // Item from the middle of 2nd vector.
11568  VmaSuballocation refSuballoc;
11569  refSuballoc.offset = offset;
11570  // Rest of members stays uninitialized intentionally for better performance.
11571  SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
11572  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
11573  VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
11574  if(it != suballocations2nd.end())
11575  {
11576  it->type = VMA_SUBALLOCATION_TYPE_FREE;
11577  it->hAllocation = VK_NULL_HANDLE;
11578  ++m_2ndNullItemsCount;
11579  m_SumFreeSize += it->size;
11580  CleanupAfterFree();
11581  return;
11582  }
11583  }
11584 
11585  VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
11586 }
11587 
11588 bool VmaBlockMetadata_Linear::ShouldCompact1st() const
11589 {
11590  const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11591  const size_t suballocCount = AccessSuballocations1st().size();
11592  return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
11593 }
11594 
11595 void VmaBlockMetadata_Linear::CleanupAfterFree()
11596 {
11597  SuballocationVectorType& suballocations1st = AccessSuballocations1st();
11598  SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
11599 
11600  if(IsEmpty())
11601  {
11602  suballocations1st.clear();
11603  suballocations2nd.clear();
11604  m_1stNullItemsBeginCount = 0;
11605  m_1stNullItemsMiddleCount = 0;
11606  m_2ndNullItemsCount = 0;
11607  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11608  }
11609  else
11610  {
11611  const size_t suballoc1stCount = suballocations1st.size();
11612  const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
11613  VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
11614 
11615  // Find more null items at the beginning of 1st vector.
11616  while(m_1stNullItemsBeginCount < suballoc1stCount &&
11617  suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11618  {
11619  ++m_1stNullItemsBeginCount;
11620  --m_1stNullItemsMiddleCount;
11621  }
11622 
11623  // Find more null items at the end of 1st vector.
11624  while(m_1stNullItemsMiddleCount > 0 &&
11625  suballocations1st.back().hAllocation == VK_NULL_HANDLE)
11626  {
11627  --m_1stNullItemsMiddleCount;
11628  suballocations1st.pop_back();
11629  }
11630 
11631  // Find more null items at the end of 2nd vector.
11632  while(m_2ndNullItemsCount > 0 &&
11633  suballocations2nd.back().hAllocation == VK_NULL_HANDLE)
11634  {
11635  --m_2ndNullItemsCount;
11636  suballocations2nd.pop_back();
11637  }
11638 
11639  // Find more null items at the beginning of 2nd vector.
11640  while(m_2ndNullItemsCount > 0 &&
11641  suballocations2nd[0].hAllocation == VK_NULL_HANDLE)
11642  {
11643  --m_2ndNullItemsCount;
11644  VmaVectorRemove(suballocations2nd, 0);
11645  }
11646 
11647  if(ShouldCompact1st())
11648  {
11649  const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
11650  size_t srcIndex = m_1stNullItemsBeginCount;
11651  for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
11652  {
11653  while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE)
11654  {
11655  ++srcIndex;
11656  }
11657  if(dstIndex != srcIndex)
11658  {
11659  suballocations1st[dstIndex] = suballocations1st[srcIndex];
11660  }
11661  ++srcIndex;
11662  }
11663  suballocations1st.resize(nonNullItemCount);
11664  m_1stNullItemsBeginCount = 0;
11665  m_1stNullItemsMiddleCount = 0;
11666  }
11667 
11668  // 2nd vector became empty.
11669  if(suballocations2nd.empty())
11670  {
11671  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11672  }
11673 
11674  // 1st vector became empty.
11675  if(suballocations1st.size() - m_1stNullItemsBeginCount == 0)
11676  {
11677  suballocations1st.clear();
11678  m_1stNullItemsBeginCount = 0;
11679 
11680  if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
11681  {
11682  // Swap 1st with 2nd. Now 2nd is empty.
11683  m_2ndVectorMode = SECOND_VECTOR_EMPTY;
11684  m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
11685  while(m_1stNullItemsBeginCount < suballocations2nd.size() &&
11686  suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE)
11687  {
11688  ++m_1stNullItemsBeginCount;
11689  --m_1stNullItemsMiddleCount;
11690  }
11691  m_2ndNullItemsCount = 0;
11692  m_1stVectorIndex ^= 1;
11693  }
11694  }
11695  }
11696 
11697  VMA_HEAVY_ASSERT(Validate());
11698 }
11699 
11700 
11702 // class VmaBlockMetadata_Buddy
11703 
11704 VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) :
11705  VmaBlockMetadata(hAllocator),
11706  m_Root(VMA_NULL),
11707  m_AllocationCount(0),
11708  m_FreeCount(1),
11709  m_SumFreeSize(0)
11710 {
11711  memset(m_FreeList, 0, sizeof(m_FreeList));
11712 }
11713 
11714 VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
11715 {
11716  DeleteNode(m_Root);
11717 }
11718 
11719 void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
11720 {
11721  VmaBlockMetadata::Init(size);
11722 
11723  m_UsableSize = VmaPrevPow2(size);
11724  m_SumFreeSize = m_UsableSize;
11725 
11726  // Calculate m_LevelCount.
11727  m_LevelCount = 1;
11728  while(m_LevelCount < MAX_LEVELS &&
11729  LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE)
11730  {
11731  ++m_LevelCount;
11732  }
11733 
11734  Node* rootNode = vma_new(GetAllocationCallbacks(), Node)();
11735  rootNode->offset = 0;
11736  rootNode->type = Node::TYPE_FREE;
11737  rootNode->parent = VMA_NULL;
11738  rootNode->buddy = VMA_NULL;
11739 
11740  m_Root = rootNode;
11741  AddToFreeListFront(0, rootNode);
11742 }
11743 
11744 bool VmaBlockMetadata_Buddy::Validate() const
11745 {
11746  // Validate tree.
11747  ValidationContext ctx;
11748  if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
11749  {
11750  VMA_VALIDATE(false && "ValidateNode failed.");
11751  }
11752  VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
11753  VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
11754 
11755  // Validate free node lists.
11756  for(uint32_t level = 0; level < m_LevelCount; ++level)
11757  {
11758  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
11759  m_FreeList[level].front->free.prev == VMA_NULL);
11760 
11761  for(Node* node = m_FreeList[level].front;
11762  node != VMA_NULL;
11763  node = node->free.next)
11764  {
11765  VMA_VALIDATE(node->type == Node::TYPE_FREE);
11766 
11767  if(node->free.next == VMA_NULL)
11768  {
11769  VMA_VALIDATE(m_FreeList[level].back == node);
11770  }
11771  else
11772  {
11773  VMA_VALIDATE(node->free.next->free.prev == node);
11774  }
11775  }
11776  }
11777 
11778  // Validate that free lists ar higher levels are empty.
11779  for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
11780  {
11781  VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
11782  }
11783 
11784  return true;
11785 }
11786 
11787 VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const
11788 {
11789  for(uint32_t level = 0; level < m_LevelCount; ++level)
11790  {
11791  if(m_FreeList[level].front != VMA_NULL)
11792  {
11793  return LevelToNodeSize(level);
11794  }
11795  }
11796  return 0;
11797 }
11798 
11799 void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const
11800 {
11801  const VkDeviceSize unusableSize = GetUnusableSize();
11802 
11803  outInfo.blockCount = 1;
11804 
11805  outInfo.allocationCount = outInfo.unusedRangeCount = 0;
11806  outInfo.usedBytes = outInfo.unusedBytes = 0;
11807 
11808  outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0;
11809  outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX;
11810  outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused.
11811 
11812  CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0));
11813 
11814  if(unusableSize > 0)
11815  {
11816  ++outInfo.unusedRangeCount;
11817  outInfo.unusedBytes += unusableSize;
11818  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize);
11819  outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize);
11820  }
11821 }
11822 
11823 void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const
11824 {
11825  const VkDeviceSize unusableSize = GetUnusableSize();
11826 
11827  inoutStats.size += GetSize();
11828  inoutStats.unusedSize += m_SumFreeSize + unusableSize;
11829  inoutStats.allocationCount += m_AllocationCount;
11830  inoutStats.unusedRangeCount += m_FreeCount;
11831  inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax());
11832 
11833  if(unusableSize > 0)
11834  {
11835  ++inoutStats.unusedRangeCount;
11836  // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations.
11837  }
11838 }
11839 
11840 #if VMA_STATS_STRING_ENABLED
11841 
11842 void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const
11843 {
11844  // TODO optimize
11845  VmaStatInfo stat;
11846  CalcAllocationStatInfo(stat);
11847 
11848  PrintDetailedMap_Begin(
11849  json,
11850  stat.unusedBytes,
11851  stat.allocationCount,
11852  stat.unusedRangeCount);
11853 
11854  PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
11855 
11856  const VkDeviceSize unusableSize = GetUnusableSize();
11857  if(unusableSize > 0)
11858  {
11859  PrintDetailedMap_UnusedRange(json,
11860  m_UsableSize, // offset
11861  unusableSize); // size
11862  }
11863 
11864  PrintDetailedMap_End(json);
11865 }
11866 
11867 #endif // #if VMA_STATS_STRING_ENABLED
11868 
11869 bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
11870  uint32_t currentFrameIndex,
11871  uint32_t frameInUseCount,
11872  VkDeviceSize bufferImageGranularity,
11873  VkDeviceSize allocSize,
11874  VkDeviceSize allocAlignment,
11875  bool upperAddress,
11876  VmaSuballocationType allocType,
11877  bool canMakeOtherLost,
11878  uint32_t strategy,
11879  VmaAllocationRequest* pAllocationRequest)
11880 {
11881  VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
11882 
11883  // Simple way to respect bufferImageGranularity. May be optimized some day.
11884  // Whenever it might be an OPTIMAL image...
11885  if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
11886  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
11887  allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
11888  {
11889  allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity);
11890  allocSize = VMA_MAX(allocSize, bufferImageGranularity);
11891  }
11892 
11893  if(allocSize > m_UsableSize)
11894  {
11895  return false;
11896  }
11897 
11898  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11899  for(uint32_t level = targetLevel + 1; level--; )
11900  {
11901  for(Node* freeNode = m_FreeList[level].front;
11902  freeNode != VMA_NULL;
11903  freeNode = freeNode->free.next)
11904  {
11905  if(freeNode->offset % allocAlignment == 0)
11906  {
11907  pAllocationRequest->type = VmaAllocationRequestType::Normal;
11908  pAllocationRequest->offset = freeNode->offset;
11909  pAllocationRequest->sumFreeSize = LevelToNodeSize(level);
11910  pAllocationRequest->sumItemSize = 0;
11911  pAllocationRequest->itemsToMakeLostCount = 0;
11912  pAllocationRequest->customData = (void*)(uintptr_t)level;
11913  return true;
11914  }
11915  }
11916  }
11917 
11918  return false;
11919 }
11920 
11921 bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost(
11922  uint32_t currentFrameIndex,
11923  uint32_t frameInUseCount,
11924  VmaAllocationRequest* pAllocationRequest)
11925 {
11926  /*
11927  Lost allocations are not supported in buddy allocator at the moment.
11928  Support might be added in the future.
11929  */
11930  return pAllocationRequest->itemsToMakeLostCount == 0;
11931 }
11932 
11933 uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount)
11934 {
11935  /*
11936  Lost allocations are not supported in buddy allocator at the moment.
11937  Support might be added in the future.
11938  */
11939  return 0;
11940 }
11941 
11942 void VmaBlockMetadata_Buddy::Alloc(
11943  const VmaAllocationRequest& request,
11944  VmaSuballocationType type,
11945  VkDeviceSize allocSize,
11946  VmaAllocation hAllocation)
11947 {
11948  VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
11949 
11950  const uint32_t targetLevel = AllocSizeToLevel(allocSize);
11951  uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
11952 
11953  Node* currNode = m_FreeList[currLevel].front;
11954  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11955  while(currNode->offset != request.offset)
11956  {
11957  currNode = currNode->free.next;
11958  VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
11959  }
11960 
11961  // Go down, splitting free nodes.
11962  while(currLevel < targetLevel)
11963  {
11964  // currNode is already first free node at currLevel.
11965  // Remove it from list of free nodes at this currLevel.
11966  RemoveFromFreeList(currLevel, currNode);
11967 
11968  const uint32_t childrenLevel = currLevel + 1;
11969 
11970  // Create two free sub-nodes.
11971  Node* leftChild = vma_new(GetAllocationCallbacks(), Node)();
11972  Node* rightChild = vma_new(GetAllocationCallbacks(), Node)();
11973 
11974  leftChild->offset = currNode->offset;
11975  leftChild->type = Node::TYPE_FREE;
11976  leftChild->parent = currNode;
11977  leftChild->buddy = rightChild;
11978 
11979  rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
11980  rightChild->type = Node::TYPE_FREE;
11981  rightChild->parent = currNode;
11982  rightChild->buddy = leftChild;
11983 
11984  // Convert current currNode to split type.
11985  currNode->type = Node::TYPE_SPLIT;
11986  currNode->split.leftChild = leftChild;
11987 
11988  // Add child nodes to free list. Order is important!
11989  AddToFreeListFront(childrenLevel, rightChild);
11990  AddToFreeListFront(childrenLevel, leftChild);
11991 
11992  ++m_FreeCount;
11993  //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2.
11994  ++currLevel;
11995  currNode = m_FreeList[currLevel].front;
11996 
11997  /*
11998  We can be sure that currNode, as left child of node previously split,
11999  also fullfills the alignment requirement.
12000  */
12001  }
12002 
12003  // Remove from free list.
12004  VMA_ASSERT(currLevel == targetLevel &&
12005  currNode != VMA_NULL &&
12006  currNode->type == Node::TYPE_FREE);
12007  RemoveFromFreeList(currLevel, currNode);
12008 
12009  // Convert to allocation node.
12010  currNode->type = Node::TYPE_ALLOCATION;
12011  currNode->allocation.alloc = hAllocation;
12012 
12013  ++m_AllocationCount;
12014  --m_FreeCount;
12015  m_SumFreeSize -= allocSize;
12016 }
12017 
12018 void VmaBlockMetadata_Buddy::DeleteNode(Node* node)
12019 {
12020  if(node->type == Node::TYPE_SPLIT)
12021  {
12022  DeleteNode(node->split.leftChild->buddy);
12023  DeleteNode(node->split.leftChild);
12024  }
12025 
12026  vma_delete(GetAllocationCallbacks(), node);
12027 }
12028 
12029 bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
12030 {
12031  VMA_VALIDATE(level < m_LevelCount);
12032  VMA_VALIDATE(curr->parent == parent);
12033  VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
12034  VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
12035  switch(curr->type)
12036  {
12037  case Node::TYPE_FREE:
12038  // curr->free.prev, next are validated separately.
12039  ctx.calculatedSumFreeSize += levelNodeSize;
12040  ++ctx.calculatedFreeCount;
12041  break;
12042  case Node::TYPE_ALLOCATION:
12043  ++ctx.calculatedAllocationCount;
12044  ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize();
12045  VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE);
12046  break;
12047  case Node::TYPE_SPLIT:
12048  {
12049  const uint32_t childrenLevel = level + 1;
12050  const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2;
12051  const Node* const leftChild = curr->split.leftChild;
12052  VMA_VALIDATE(leftChild != VMA_NULL);
12053  VMA_VALIDATE(leftChild->offset == curr->offset);
12054  if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
12055  {
12056  VMA_VALIDATE(false && "ValidateNode for left child failed.");
12057  }
12058  const Node* const rightChild = leftChild->buddy;
12059  VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
12060  if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
12061  {
12062  VMA_VALIDATE(false && "ValidateNode for right child failed.");
12063  }
12064  }
12065  break;
12066  default:
12067  return false;
12068  }
12069 
12070  return true;
12071 }
12072 
12073 uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
12074 {
12075  // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
12076  uint32_t level = 0;
12077  VkDeviceSize currLevelNodeSize = m_UsableSize;
12078  VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
12079  while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
12080  {
12081  ++level;
12082  currLevelNodeSize = nextLevelNodeSize;
12083  nextLevelNodeSize = currLevelNodeSize >> 1;
12084  }
12085  return level;
12086 }
12087 
12088 void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset)
12089 {
12090  // Find node and level.
12091  Node* node = m_Root;
12092  VkDeviceSize nodeOffset = 0;
12093  uint32_t level = 0;
12094  VkDeviceSize levelNodeSize = LevelToNodeSize(0);
12095  while(node->type == Node::TYPE_SPLIT)
12096  {
12097  const VkDeviceSize nextLevelSize = levelNodeSize >> 1;
12098  if(offset < nodeOffset + nextLevelSize)
12099  {
12100  node = node->split.leftChild;
12101  }
12102  else
12103  {
12104  node = node->split.leftChild->buddy;
12105  nodeOffset += nextLevelSize;
12106  }
12107  ++level;
12108  levelNodeSize = nextLevelSize;
12109  }
12110 
12111  VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
12112  VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc);
12113 
12114  ++m_FreeCount;
12115  --m_AllocationCount;
12116  m_SumFreeSize += alloc->GetSize();
12117 
12118  node->type = Node::TYPE_FREE;
12119 
12120  // Join free nodes if possible.
12121  while(level > 0 && node->buddy->type == Node::TYPE_FREE)
12122  {
12123  RemoveFromFreeList(level, node->buddy);
12124  Node* const parent = node->parent;
12125 
12126  vma_delete(GetAllocationCallbacks(), node->buddy);
12127  vma_delete(GetAllocationCallbacks(), node);
12128  parent->type = Node::TYPE_FREE;
12129 
12130  node = parent;
12131  --level;
12132  //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2.
12133  --m_FreeCount;
12134  }
12135 
12136  AddToFreeListFront(level, node);
12137 }
12138 
12139 void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const
12140 {
12141  switch(node->type)
12142  {
12143  case Node::TYPE_FREE:
12144  ++outInfo.unusedRangeCount;
12145  outInfo.unusedBytes += levelNodeSize;
12146  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize);
12147  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize);
12148  break;
12149  case Node::TYPE_ALLOCATION:
12150  {
12151  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12152  ++outInfo.allocationCount;
12153  outInfo.usedBytes += allocSize;
12154  outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize);
12155  outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize);
12156 
12157  const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize;
12158  if(unusedRangeSize > 0)
12159  {
12160  ++outInfo.unusedRangeCount;
12161  outInfo.unusedBytes += unusedRangeSize;
12162  outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize);
12163  outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize);
12164  }
12165  }
12166  break;
12167  case Node::TYPE_SPLIT:
12168  {
12169  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12170  const Node* const leftChild = node->split.leftChild;
12171  CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize);
12172  const Node* const rightChild = leftChild->buddy;
12173  CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize);
12174  }
12175  break;
12176  default:
12177  VMA_ASSERT(0);
12178  }
12179 }
12180 
12181 void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
12182 {
12183  VMA_ASSERT(node->type == Node::TYPE_FREE);
12184 
12185  // List is empty.
12186  Node* const frontNode = m_FreeList[level].front;
12187  if(frontNode == VMA_NULL)
12188  {
12189  VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
12190  node->free.prev = node->free.next = VMA_NULL;
12191  m_FreeList[level].front = m_FreeList[level].back = node;
12192  }
12193  else
12194  {
12195  VMA_ASSERT(frontNode->free.prev == VMA_NULL);
12196  node->free.prev = VMA_NULL;
12197  node->free.next = frontNode;
12198  frontNode->free.prev = node;
12199  m_FreeList[level].front = node;
12200  }
12201 }
12202 
12203 void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
12204 {
12205  VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
12206 
12207  // It is at the front.
12208  if(node->free.prev == VMA_NULL)
12209  {
12210  VMA_ASSERT(m_FreeList[level].front == node);
12211  m_FreeList[level].front = node->free.next;
12212  }
12213  else
12214  {
12215  Node* const prevFreeNode = node->free.prev;
12216  VMA_ASSERT(prevFreeNode->free.next == node);
12217  prevFreeNode->free.next = node->free.next;
12218  }
12219 
12220  // It is at the back.
12221  if(node->free.next == VMA_NULL)
12222  {
12223  VMA_ASSERT(m_FreeList[level].back == node);
12224  m_FreeList[level].back = node->free.prev;
12225  }
12226  else
12227  {
12228  Node* const nextFreeNode = node->free.next;
12229  VMA_ASSERT(nextFreeNode->free.prev == node);
12230  nextFreeNode->free.prev = node->free.prev;
12231  }
12232 }
12233 
12234 #if VMA_STATS_STRING_ENABLED
12235 void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
12236 {
12237  switch(node->type)
12238  {
12239  case Node::TYPE_FREE:
12240  PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
12241  break;
12242  case Node::TYPE_ALLOCATION:
12243  {
12244  PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc);
12245  const VkDeviceSize allocSize = node->allocation.alloc->GetSize();
12246  if(allocSize < levelNodeSize)
12247  {
12248  PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize);
12249  }
12250  }
12251  break;
12252  case Node::TYPE_SPLIT:
12253  {
12254  const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
12255  const Node* const leftChild = node->split.leftChild;
12256  PrintDetailedMapNode(json, leftChild, childrenNodeSize);
12257  const Node* const rightChild = leftChild->buddy;
12258  PrintDetailedMapNode(json, rightChild, childrenNodeSize);
12259  }
12260  break;
12261  default:
12262  VMA_ASSERT(0);
12263  }
12264 }
12265 #endif // #if VMA_STATS_STRING_ENABLED
12266 
12267 
12269 // class VmaDeviceMemoryBlock
12270 
12271 VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) :
12272  m_pMetadata(VMA_NULL),
12273  m_MemoryTypeIndex(UINT32_MAX),
12274  m_Id(0),
12275  m_hMemory(VK_NULL_HANDLE),
12276  m_MapCount(0),
12277  m_pMappedData(VMA_NULL)
12278 {
12279 }
12280 
12281 void VmaDeviceMemoryBlock::Init(
12282  VmaAllocator hAllocator,
12283  VmaPool hParentPool,
12284  uint32_t newMemoryTypeIndex,
12285  VkDeviceMemory newMemory,
12286  VkDeviceSize newSize,
12287  uint32_t id,
12288  uint32_t algorithm)
12289 {
12290  VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
12291 
12292  m_hParentPool = hParentPool;
12293  m_MemoryTypeIndex = newMemoryTypeIndex;
12294  m_Id = id;
12295  m_hMemory = newMemory;
12296 
12297  switch(algorithm)
12298  {
12300  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator);
12301  break;
12303  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator);
12304  break;
12305  default:
12306  VMA_ASSERT(0);
12307  // Fall-through.
12308  case 0:
12309  m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator);
12310  }
12311  m_pMetadata->Init(newSize);
12312 }
12313 
12314 void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
12315 {
12316  // This is the most important assert in the entire library.
12317  // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
12318  VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
12319 
12320  VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
12321  allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
12322  m_hMemory = VK_NULL_HANDLE;
12323 
12324  vma_delete(allocator, m_pMetadata);
12325  m_pMetadata = VMA_NULL;
12326 }
12327 
12328 bool VmaDeviceMemoryBlock::Validate() const
12329 {
12330  VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
12331  (m_pMetadata->GetSize() != 0));
12332 
12333  return m_pMetadata->Validate();
12334 }
12335 
12336 VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
12337 {
12338  void* pData = nullptr;
12339  VkResult res = Map(hAllocator, 1, &pData);
12340  if(res != VK_SUCCESS)
12341  {
12342  return res;
12343  }
12344 
12345  res = m_pMetadata->CheckCorruption(pData);
12346 
12347  Unmap(hAllocator, 1);
12348 
12349  return res;
12350 }
12351 
12352 VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
12353 {
12354  if(count == 0)
12355  {
12356  return VK_SUCCESS;
12357  }
12358 
12359  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12360  if(m_MapCount != 0)
12361  {
12362  m_MapCount += count;
12363  VMA_ASSERT(m_pMappedData != VMA_NULL);
12364  if(ppData != VMA_NULL)
12365  {
12366  *ppData = m_pMappedData;
12367  }
12368  return VK_SUCCESS;
12369  }
12370  else
12371  {
12372  VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
12373  hAllocator->m_hDevice,
12374  m_hMemory,
12375  0, // offset
12376  VK_WHOLE_SIZE,
12377  0, // flags
12378  &m_pMappedData);
12379  if(result == VK_SUCCESS)
12380  {
12381  if(ppData != VMA_NULL)
12382  {
12383  *ppData = m_pMappedData;
12384  }
12385  m_MapCount = count;
12386  }
12387  return result;
12388  }
12389 }
12390 
12391 void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
12392 {
12393  if(count == 0)
12394  {
12395  return;
12396  }
12397 
12398  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12399  if(m_MapCount >= count)
12400  {
12401  m_MapCount -= count;
12402  if(m_MapCount == 0)
12403  {
12404  m_pMappedData = VMA_NULL;
12405  (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
12406  }
12407  }
12408  else
12409  {
12410  VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
12411  }
12412 }
12413 
12414 VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12415 {
12416  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12417  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12418 
12419  void* pData;
12420  VkResult res = Map(hAllocator, 1, &pData);
12421  if(res != VK_SUCCESS)
12422  {
12423  return res;
12424  }
12425 
12426  VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN);
12427  VmaWriteMagicValue(pData, allocOffset + allocSize);
12428 
12429  Unmap(hAllocator, 1);
12430 
12431  return VK_SUCCESS;
12432 }
12433 
12434 VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
12435 {
12436  VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
12437  VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN);
12438 
12439  void* pData;
12440  VkResult res = Map(hAllocator, 1, &pData);
12441  if(res != VK_SUCCESS)
12442  {
12443  return res;
12444  }
12445 
12446  if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN))
12447  {
12448  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!");
12449  }
12450  else if(!VmaValidateMagicValue(pData, allocOffset + allocSize))
12451  {
12452  VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
12453  }
12454 
12455  Unmap(hAllocator, 1);
12456 
12457  return VK_SUCCESS;
12458 }
12459 
12460 VkResult VmaDeviceMemoryBlock::BindBufferMemory(
12461  const VmaAllocator hAllocator,
12462  const VmaAllocation hAllocation,
12463  VkDeviceSize allocationLocalOffset,
12464  VkBuffer hBuffer,
12465  const void* pNext)
12466 {
12467  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12468  hAllocation->GetBlock() == this);
12469  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12470  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12471  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12472  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12473  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12474  return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
12475 }
12476 
12477 VkResult VmaDeviceMemoryBlock::BindImageMemory(
12478  const VmaAllocator hAllocator,
12479  const VmaAllocation hAllocation,
12480  VkDeviceSize allocationLocalOffset,
12481  VkImage hImage,
12482  const void* pNext)
12483 {
12484  VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
12485  hAllocation->GetBlock() == this);
12486  VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
12487  "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
12488  const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
12489  // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
12490  VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex);
12491  return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
12492 }
12493 
12494 static void InitStatInfo(VmaStatInfo& outInfo)
12495 {
12496  memset(&outInfo, 0, sizeof(outInfo));
12497  outInfo.allocationSizeMin = UINT64_MAX;
12498  outInfo.unusedRangeSizeMin = UINT64_MAX;
12499 }
12500 
12501 // Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo.
12502 static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo)
12503 {
12504  inoutInfo.blockCount += srcInfo.blockCount;
12505  inoutInfo.allocationCount += srcInfo.allocationCount;
12506  inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount;
12507  inoutInfo.usedBytes += srcInfo.usedBytes;
12508  inoutInfo.unusedBytes += srcInfo.unusedBytes;
12509  inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin);
12510  inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax);
12511  inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin);
12512  inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax);
12513 }
12514 
12515 static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo)
12516 {
12517  inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ?
12518  VmaRoundDiv<VkDeviceSize>(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0;
12519  inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ?
12520  VmaRoundDiv<VkDeviceSize>(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0;
12521 }
12522 
12523 VmaPool_T::VmaPool_T(
12524  VmaAllocator hAllocator,
12525  const VmaPoolCreateInfo& createInfo,
12526  VkDeviceSize preferredBlockSize) :
12527  m_BlockVector(
12528  hAllocator,
12529  this, // hParentPool
12530  createInfo.memoryTypeIndex,
12531  createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
12532  createInfo.minBlockCount,
12533  createInfo.maxBlockCount,
12534  (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
12535  createInfo.frameInUseCount,
12536  createInfo.blockSize != 0, // explicitBlockSize
12537  createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK), // algorithm
12538  m_Id(0),
12539  m_Name(VMA_NULL)
12540 {
12541 }
12542 
12543 VmaPool_T::~VmaPool_T()
12544 {
12545 }
12546 
12547 void VmaPool_T::SetName(const char* pName)
12548 {
12549  const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
12550  VmaFreeString(allocs, m_Name);
12551 
12552  if(pName != VMA_NULL)
12553  {
12554  m_Name = VmaCreateStringCopy(allocs, pName);
12555  }
12556  else
12557  {
12558  m_Name = VMA_NULL;
12559  }
12560 }
12561 
12562 #if VMA_STATS_STRING_ENABLED
12563 
12564 #endif // #if VMA_STATS_STRING_ENABLED
12565 
12566 VmaBlockVector::VmaBlockVector(
12567  VmaAllocator hAllocator,
12568  VmaPool hParentPool,
12569  uint32_t memoryTypeIndex,
12570  VkDeviceSize preferredBlockSize,
12571  size_t minBlockCount,
12572  size_t maxBlockCount,
12573  VkDeviceSize bufferImageGranularity,
12574  uint32_t frameInUseCount,
12575  bool explicitBlockSize,
12576  uint32_t algorithm) :
12577  m_hAllocator(hAllocator),
12578  m_hParentPool(hParentPool),
12579  m_MemoryTypeIndex(memoryTypeIndex),
12580  m_PreferredBlockSize(preferredBlockSize),
12581  m_MinBlockCount(minBlockCount),
12582  m_MaxBlockCount(maxBlockCount),
12583  m_BufferImageGranularity(bufferImageGranularity),
12584  m_FrameInUseCount(frameInUseCount),
12585  m_ExplicitBlockSize(explicitBlockSize),
12586  m_Algorithm(algorithm),
12587  m_HasEmptyBlock(false),
12588  m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
12589  m_NextBlockId(0)
12590 {
12591 }
12592 
12593 VmaBlockVector::~VmaBlockVector()
12594 {
12595  for(size_t i = m_Blocks.size(); i--; )
12596  {
12597  m_Blocks[i]->Destroy(m_hAllocator);
12598  vma_delete(m_hAllocator, m_Blocks[i]);
12599  }
12600 }
12601 
12602 VkResult VmaBlockVector::CreateMinBlocks()
12603 {
12604  for(size_t i = 0; i < m_MinBlockCount; ++i)
12605  {
12606  VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
12607  if(res != VK_SUCCESS)
12608  {
12609  return res;
12610  }
12611  }
12612  return VK_SUCCESS;
12613 }
12614 
12615 void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats)
12616 {
12617  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12618 
12619  const size_t blockCount = m_Blocks.size();
12620 
12621  pStats->size = 0;
12622  pStats->unusedSize = 0;
12623  pStats->allocationCount = 0;
12624  pStats->unusedRangeCount = 0;
12625  pStats->unusedRangeSizeMax = 0;
12626  pStats->blockCount = blockCount;
12627 
12628  for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
12629  {
12630  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
12631  VMA_ASSERT(pBlock);
12632  VMA_HEAVY_ASSERT(pBlock->Validate());
12633  pBlock->m_pMetadata->AddPoolStats(*pStats);
12634  }
12635 }
12636 
12637 bool VmaBlockVector::IsEmpty()
12638 {
12639  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
12640  return m_Blocks.empty();
12641 }
12642 
12643 bool VmaBlockVector::IsCorruptionDetectionEnabled() const
12644 {
12645  const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
12646  return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
12647  (VMA_DEBUG_MARGIN > 0) &&
12648  (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
12649  (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
12650 }
12651 
12652 static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
12653 
12654 VkResult VmaBlockVector::Allocate(
12655  uint32_t currentFrameIndex,
12656  VkDeviceSize size,
12657  VkDeviceSize alignment,
12658  const VmaAllocationCreateInfo& createInfo,
12659  VmaSuballocationType suballocType,
12660  size_t allocationCount,
12661  VmaAllocation* pAllocations)
12662 {
12663  size_t allocIndex;
12664  VkResult res = VK_SUCCESS;
12665 
12666  if(IsCorruptionDetectionEnabled())
12667  {
12668  size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12669  alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
12670  }
12671 
12672  {
12673  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
12674  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
12675  {
12676  res = AllocatePage(
12677  currentFrameIndex,
12678  size,
12679  alignment,
12680  createInfo,
12681  suballocType,
12682  pAllocations + allocIndex);
12683  if(res != VK_SUCCESS)
12684  {
12685  break;
12686  }
12687  }
12688  }
12689 
12690  if(res != VK_SUCCESS)
12691  {
12692  // Free all already created allocations.
12693  while(allocIndex--)
12694  {
12695  Free(pAllocations[allocIndex]);
12696  }
12697  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
12698  }
12699 
12700  return res;
12701 }
12702 
12703 VkResult VmaBlockVector::AllocatePage(
12704  uint32_t currentFrameIndex,
12705  VkDeviceSize size,
12706  VkDeviceSize alignment,
12707  const VmaAllocationCreateInfo& createInfo,
12708  VmaSuballocationType suballocType,
12709  VmaAllocation* pAllocation)
12710 {
12711  const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
12712  bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0;
12713  const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
12714  const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
12715 
12716  VkDeviceSize freeMemory;
12717  {
12718  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
12719  VmaBudget heapBudget = {};
12720  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
12721  freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
12722  }
12723 
12724  const bool canFallbackToDedicated = !IsCustomPool();
12725  const bool canCreateNewBlock =
12726  ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
12727  (m_Blocks.size() < m_MaxBlockCount) &&
12728  (freeMemory >= size || !canFallbackToDedicated);
12729  uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
12730 
12731  // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer.
12732  // Which in turn is available only when maxBlockCount = 1.
12733  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1)
12734  {
12735  canMakeOtherLost = false;
12736  }
12737 
12738  // Upper address can only be used with linear allocator and within single memory block.
12739  if(isUpperAddress &&
12740  (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
12741  {
12742  return VK_ERROR_FEATURE_NOT_PRESENT;
12743  }
12744 
12745  // Validate strategy.
12746  switch(strategy)
12747  {
12748  case 0:
12750  break;
12754  break;
12755  default:
12756  return VK_ERROR_FEATURE_NOT_PRESENT;
12757  }
12758 
12759  // Early reject: requested allocation size is larger that maximum block size for this block vector.
12760  if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize)
12761  {
12762  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12763  }
12764 
12765  /*
12766  Under certain condition, this whole section can be skipped for optimization, so
12767  we move on directly to trying to allocate with canMakeOtherLost. That's the case
12768  e.g. for custom pools with linear algorithm.
12769  */
12770  if(!canMakeOtherLost || canCreateNewBlock)
12771  {
12772  // 1. Search existing allocations. Try to allocate without making other allocations lost.
12773  VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags;
12775 
12776  if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
12777  {
12778  // Use only last block.
12779  if(!m_Blocks.empty())
12780  {
12781  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
12782  VMA_ASSERT(pCurrBlock);
12783  VkResult res = AllocateFromBlock(
12784  pCurrBlock,
12785  currentFrameIndex,
12786  size,
12787  alignment,
12788  allocFlagsCopy,
12789  createInfo.pUserData,
12790  suballocType,
12791  strategy,
12792  pAllocation);
12793  if(res == VK_SUCCESS)
12794  {
12795  VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId());
12796  return VK_SUCCESS;
12797  }
12798  }
12799  }
12800  else
12801  {
12803  {
12804  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12805  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12806  {
12807  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12808  VMA_ASSERT(pCurrBlock);
12809  VkResult res = AllocateFromBlock(
12810  pCurrBlock,
12811  currentFrameIndex,
12812  size,
12813  alignment,
12814  allocFlagsCopy,
12815  createInfo.pUserData,
12816  suballocType,
12817  strategy,
12818  pAllocation);
12819  if(res == VK_SUCCESS)
12820  {
12821  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12822  return VK_SUCCESS;
12823  }
12824  }
12825  }
12826  else // WORST_FIT, FIRST_FIT
12827  {
12828  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12829  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12830  {
12831  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12832  VMA_ASSERT(pCurrBlock);
12833  VkResult res = AllocateFromBlock(
12834  pCurrBlock,
12835  currentFrameIndex,
12836  size,
12837  alignment,
12838  allocFlagsCopy,
12839  createInfo.pUserData,
12840  suballocType,
12841  strategy,
12842  pAllocation);
12843  if(res == VK_SUCCESS)
12844  {
12845  VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId());
12846  return VK_SUCCESS;
12847  }
12848  }
12849  }
12850  }
12851 
12852  // 2. Try to create new block.
12853  if(canCreateNewBlock)
12854  {
12855  // Calculate optimal size for new block.
12856  VkDeviceSize newBlockSize = m_PreferredBlockSize;
12857  uint32_t newBlockSizeShift = 0;
12858  const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
12859 
12860  if(!m_ExplicitBlockSize)
12861  {
12862  // Allocate 1/8, 1/4, 1/2 as first blocks.
12863  const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
12864  for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
12865  {
12866  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12867  if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
12868  {
12869  newBlockSize = smallerNewBlockSize;
12870  ++newBlockSizeShift;
12871  }
12872  else
12873  {
12874  break;
12875  }
12876  }
12877  }
12878 
12879  size_t newBlockIndex = 0;
12880  VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12881  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12882  // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
12883  if(!m_ExplicitBlockSize)
12884  {
12885  while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
12886  {
12887  const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
12888  if(smallerNewBlockSize >= size)
12889  {
12890  newBlockSize = smallerNewBlockSize;
12891  ++newBlockSizeShift;
12892  res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
12893  CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
12894  }
12895  else
12896  {
12897  break;
12898  }
12899  }
12900  }
12901 
12902  if(res == VK_SUCCESS)
12903  {
12904  VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
12905  VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
12906 
12907  res = AllocateFromBlock(
12908  pBlock,
12909  currentFrameIndex,
12910  size,
12911  alignment,
12912  allocFlagsCopy,
12913  createInfo.pUserData,
12914  suballocType,
12915  strategy,
12916  pAllocation);
12917  if(res == VK_SUCCESS)
12918  {
12919  VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
12920  return VK_SUCCESS;
12921  }
12922  else
12923  {
12924  // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
12925  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
12926  }
12927  }
12928  }
12929  }
12930 
12931  // 3. Try to allocate from existing blocks with making other allocations lost.
12932  if(canMakeOtherLost)
12933  {
12934  uint32_t tryIndex = 0;
12935  for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex)
12936  {
12937  VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL;
12938  VmaAllocationRequest bestRequest = {};
12939  VkDeviceSize bestRequestCost = VK_WHOLE_SIZE;
12940 
12941  // 1. Search existing allocations.
12943  {
12944  // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
12945  for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex )
12946  {
12947  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12948  VMA_ASSERT(pCurrBlock);
12949  VmaAllocationRequest currRequest = {};
12950  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12951  currentFrameIndex,
12952  m_FrameInUseCount,
12953  m_BufferImageGranularity,
12954  size,
12955  alignment,
12956  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12957  suballocType,
12958  canMakeOtherLost,
12959  strategy,
12960  &currRequest))
12961  {
12962  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12963  if(pBestRequestBlock == VMA_NULL ||
12964  currRequestCost < bestRequestCost)
12965  {
12966  pBestRequestBlock = pCurrBlock;
12967  bestRequest = currRequest;
12968  bestRequestCost = currRequestCost;
12969 
12970  if(bestRequestCost == 0)
12971  {
12972  break;
12973  }
12974  }
12975  }
12976  }
12977  }
12978  else // WORST_FIT, FIRST_FIT
12979  {
12980  // Backward order in m_Blocks - prefer blocks with largest amount of free space.
12981  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
12982  {
12983  VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
12984  VMA_ASSERT(pCurrBlock);
12985  VmaAllocationRequest currRequest = {};
12986  if(pCurrBlock->m_pMetadata->CreateAllocationRequest(
12987  currentFrameIndex,
12988  m_FrameInUseCount,
12989  m_BufferImageGranularity,
12990  size,
12991  alignment,
12992  (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0,
12993  suballocType,
12994  canMakeOtherLost,
12995  strategy,
12996  &currRequest))
12997  {
12998  const VkDeviceSize currRequestCost = currRequest.CalcCost();
12999  if(pBestRequestBlock == VMA_NULL ||
13000  currRequestCost < bestRequestCost ||
13002  {
13003  pBestRequestBlock = pCurrBlock;
13004  bestRequest = currRequest;
13005  bestRequestCost = currRequestCost;
13006 
13007  if(bestRequestCost == 0 ||
13009  {
13010  break;
13011  }
13012  }
13013  }
13014  }
13015  }
13016 
13017  if(pBestRequestBlock != VMA_NULL)
13018  {
13019  if(mapped)
13020  {
13021  VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL);
13022  if(res != VK_SUCCESS)
13023  {
13024  return res;
13025  }
13026  }
13027 
13028  if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost(
13029  currentFrameIndex,
13030  m_FrameInUseCount,
13031  &bestRequest))
13032  {
13033  // Allocate from this pBlock.
13034  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13035  pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation);
13036  UpdateHasEmptyBlock();
13037  (*pAllocation)->InitBlockAllocation(
13038  pBestRequestBlock,
13039  bestRequest.offset,
13040  alignment,
13041  size,
13042  m_MemoryTypeIndex,
13043  suballocType,
13044  mapped,
13045  (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13046  VMA_HEAVY_ASSERT(pBestRequestBlock->Validate());
13047  VMA_DEBUG_LOG(" Returned from existing block");
13048  (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData);
13049  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13050  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13051  {
13052  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13053  }
13054  if(IsCorruptionDetectionEnabled())
13055  {
13056  VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size);
13057  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13058  }
13059  return VK_SUCCESS;
13060  }
13061  // else: Some allocations must have been touched while we are here. Next try.
13062  }
13063  else
13064  {
13065  // Could not find place in any of the blocks - break outer loop.
13066  break;
13067  }
13068  }
13069  /* Maximum number of tries exceeded - a very unlike event when many other
13070  threads are simultaneously touching allocations making it impossible to make
13071  lost at the same time as we try to allocate. */
13072  if(tryIndex == VMA_ALLOCATION_TRY_COUNT)
13073  {
13074  return VK_ERROR_TOO_MANY_OBJECTS;
13075  }
13076  }
13077 
13078  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13079 }
13080 
13081 void VmaBlockVector::Free(
13082  const VmaAllocation hAllocation)
13083 {
13084  VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
13085 
13086  bool budgetExceeded = false;
13087  {
13088  const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
13089  VmaBudget heapBudget = {};
13090  m_hAllocator->GetBudget(&heapBudget, heapIndex, 1);
13091  budgetExceeded = heapBudget.usage >= heapBudget.budget;
13092  }
13093 
13094  // Scope for lock.
13095  {
13096  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13097 
13098  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
13099 
13100  if(IsCorruptionDetectionEnabled())
13101  {
13102  VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
13103  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
13104  }
13105 
13106  if(hAllocation->IsPersistentMap())
13107  {
13108  pBlock->Unmap(m_hAllocator, 1);
13109  }
13110 
13111  pBlock->m_pMetadata->Free(hAllocation);
13112  VMA_HEAVY_ASSERT(pBlock->Validate());
13113 
13114  VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
13115 
13116  const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
13117  // pBlock became empty after this deallocation.
13118  if(pBlock->m_pMetadata->IsEmpty())
13119  {
13120  // Already has empty block. We don't want to have two, so delete this one.
13121  if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock)
13122  {
13123  pBlockToDelete = pBlock;
13124  Remove(pBlock);
13125  }
13126  // else: We now have an empty block - leave it.
13127  }
13128  // pBlock didn't become empty, but we have another empty block - find and free that one.
13129  // (This is optional, heuristics.)
13130  else if(m_HasEmptyBlock && canDeleteBlock)
13131  {
13132  VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
13133  if(pLastBlock->m_pMetadata->IsEmpty())
13134  {
13135  pBlockToDelete = pLastBlock;
13136  m_Blocks.pop_back();
13137  }
13138  }
13139 
13140  UpdateHasEmptyBlock();
13141  IncrementallySortBlocks();
13142  }
13143 
13144  // Destruction of a free block. Deferred until this point, outside of mutex
13145  // lock, for performance reason.
13146  if(pBlockToDelete != VMA_NULL)
13147  {
13148  VMA_DEBUG_LOG(" Deleted empty block");
13149  pBlockToDelete->Destroy(m_hAllocator);
13150  vma_delete(m_hAllocator, pBlockToDelete);
13151  }
13152 }
13153 
13154 VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
13155 {
13156  VkDeviceSize result = 0;
13157  for(size_t i = m_Blocks.size(); i--; )
13158  {
13159  result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
13160  if(result >= m_PreferredBlockSize)
13161  {
13162  break;
13163  }
13164  }
13165  return result;
13166 }
13167 
13168 void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
13169 {
13170  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13171  {
13172  if(m_Blocks[blockIndex] == pBlock)
13173  {
13174  VmaVectorRemove(m_Blocks, blockIndex);
13175  return;
13176  }
13177  }
13178  VMA_ASSERT(0);
13179 }
13180 
13181 void VmaBlockVector::IncrementallySortBlocks()
13182 {
13183  if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
13184  {
13185  // Bubble sort only until first swap.
13186  for(size_t i = 1; i < m_Blocks.size(); ++i)
13187  {
13188  if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
13189  {
13190  VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
13191  return;
13192  }
13193  }
13194  }
13195 }
13196 
13197 VkResult VmaBlockVector::AllocateFromBlock(
13198  VmaDeviceMemoryBlock* pBlock,
13199  uint32_t currentFrameIndex,
13200  VkDeviceSize size,
13201  VkDeviceSize alignment,
13202  VmaAllocationCreateFlags allocFlags,
13203  void* pUserData,
13204  VmaSuballocationType suballocType,
13205  uint32_t strategy,
13206  VmaAllocation* pAllocation)
13207 {
13208  VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0);
13209  const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
13210  const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
13211  const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
13212 
13213  VmaAllocationRequest currRequest = {};
13214  if(pBlock->m_pMetadata->CreateAllocationRequest(
13215  currentFrameIndex,
13216  m_FrameInUseCount,
13217  m_BufferImageGranularity,
13218  size,
13219  alignment,
13220  isUpperAddress,
13221  suballocType,
13222  false, // canMakeOtherLost
13223  strategy,
13224  &currRequest))
13225  {
13226  // Allocate from pCurrBlock.
13227  VMA_ASSERT(currRequest.itemsToMakeLostCount == 0);
13228 
13229  if(mapped)
13230  {
13231  VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
13232  if(res != VK_SUCCESS)
13233  {
13234  return res;
13235  }
13236  }
13237 
13238  *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString);
13239  pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation);
13240  UpdateHasEmptyBlock();
13241  (*pAllocation)->InitBlockAllocation(
13242  pBlock,
13243  currRequest.offset,
13244  alignment,
13245  size,
13246  m_MemoryTypeIndex,
13247  suballocType,
13248  mapped,
13249  (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0);
13250  VMA_HEAVY_ASSERT(pBlock->Validate());
13251  (*pAllocation)->SetUserData(m_hAllocator, pUserData);
13252  m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size);
13253  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
13254  {
13255  m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
13256  }
13257  if(IsCorruptionDetectionEnabled())
13258  {
13259  VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size);
13260  VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
13261  }
13262  return VK_SUCCESS;
13263  }
13264  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
13265 }
13266 
13267 VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
13268 {
13269  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
13270  allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
13271  allocInfo.allocationSize = blockSize;
13272 
13273 #if VMA_BUFFER_DEVICE_ADDRESS
13274  // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
13275  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
13276  if(m_hAllocator->m_UseKhrBufferDeviceAddress)
13277  {
13278  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
13279  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
13280  }
13281 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
13282 
13283  VkDeviceMemory mem = VK_NULL_HANDLE;
13284  VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
13285  if(res < 0)
13286  {
13287  return res;
13288  }
13289 
13290  // New VkDeviceMemory successfully created.
13291 
13292  // Create new Allocation for it.
13293  VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
13294  pBlock->Init(
13295  m_hAllocator,
13296  m_hParentPool,
13297  m_MemoryTypeIndex,
13298  mem,
13299  allocInfo.allocationSize,
13300  m_NextBlockId++,
13301  m_Algorithm);
13302 
13303  m_Blocks.push_back(pBlock);
13304  if(pNewBlockIndex != VMA_NULL)
13305  {
13306  *pNewBlockIndex = m_Blocks.size() - 1;
13307  }
13308 
13309  return VK_SUCCESS;
13310 }
13311 
13312 void VmaBlockVector::ApplyDefragmentationMovesCpu(
13313  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13314  const VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves)
13315 {
13316  const size_t blockCount = m_Blocks.size();
13317  const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex);
13318 
13319  enum BLOCK_FLAG
13320  {
13321  BLOCK_FLAG_USED = 0x00000001,
13322  BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002,
13323  };
13324 
13325  struct BlockInfo
13326  {
13327  uint32_t flags;
13328  void* pMappedData;
13329  };
13330  VmaVector< BlockInfo, VmaStlAllocator<BlockInfo> >
13331  blockInfo(blockCount, BlockInfo(), VmaStlAllocator<BlockInfo>(m_hAllocator->GetAllocationCallbacks()));
13332  memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo));
13333 
13334  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13335  const size_t moveCount = moves.size();
13336  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13337  {
13338  const VmaDefragmentationMove& move = moves[moveIndex];
13339  blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED;
13340  blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED;
13341  }
13342 
13343  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13344 
13345  // Go over all blocks. Get mapped pointer or map if necessary.
13346  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13347  {
13348  BlockInfo& currBlockInfo = blockInfo[blockIndex];
13349  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13350  if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0)
13351  {
13352  currBlockInfo.pMappedData = pBlock->GetMappedData();
13353  // It is not originally mapped - map it.
13354  if(currBlockInfo.pMappedData == VMA_NULL)
13355  {
13356  pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData);
13357  if(pDefragCtx->res == VK_SUCCESS)
13358  {
13359  currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION;
13360  }
13361  }
13362  }
13363  }
13364 
13365  // Go over all moves. Do actual data transfer.
13366  if(pDefragCtx->res == VK_SUCCESS)
13367  {
13368  const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
13369  VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE };
13370 
13371  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13372  {
13373  const VmaDefragmentationMove& move = moves[moveIndex];
13374 
13375  const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex];
13376  const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex];
13377 
13378  VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData);
13379 
13380  // Invalidate source.
13381  if(isNonCoherent)
13382  {
13383  VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex];
13384  memRange.memory = pSrcBlock->GetDeviceMemory();
13385  memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize);
13386  memRange.size = VMA_MIN(
13387  VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize),
13388  pSrcBlock->m_pMetadata->GetSize() - memRange.offset);
13389  (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13390  }
13391 
13392  // THE PLACE WHERE ACTUAL DATA COPY HAPPENS.
13393  memmove(
13394  reinterpret_cast<char*>(dstBlockInfo.pMappedData) + move.dstOffset,
13395  reinterpret_cast<char*>(srcBlockInfo.pMappedData) + move.srcOffset,
13396  static_cast<size_t>(move.size));
13397 
13398  if(IsCorruptionDetectionEnabled())
13399  {
13400  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN);
13401  VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size);
13402  }
13403 
13404  // Flush destination.
13405  if(isNonCoherent)
13406  {
13407  VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex];
13408  memRange.memory = pDstBlock->GetDeviceMemory();
13409  memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize);
13410  memRange.size = VMA_MIN(
13411  VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize),
13412  pDstBlock->m_pMetadata->GetSize() - memRange.offset);
13413  (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange);
13414  }
13415  }
13416  }
13417 
13418  // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation.
13419  // Regardless of pCtx->res == VK_SUCCESS.
13420  for(size_t blockIndex = blockCount; blockIndex--; )
13421  {
13422  const BlockInfo& currBlockInfo = blockInfo[blockIndex];
13423  if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0)
13424  {
13425  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13426  pBlock->Unmap(m_hAllocator, 1);
13427  }
13428  }
13429 }
13430 
13431 void VmaBlockVector::ApplyDefragmentationMovesGpu(
13432  class VmaBlockVectorDefragmentationContext* pDefragCtx,
13433  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13434  VkCommandBuffer commandBuffer)
13435 {
13436  const size_t blockCount = m_Blocks.size();
13437 
13438  pDefragCtx->blockContexts.resize(blockCount);
13439  memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext));
13440 
13441  // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED.
13442  const size_t moveCount = moves.size();
13443  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13444  {
13445  const VmaDefragmentationMove& move = moves[moveIndex];
13446 
13447  //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN)
13448  {
13449  // Old school move still require us to map the whole block
13450  pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13451  pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED;
13452  }
13453  }
13454 
13455  VMA_ASSERT(pDefragCtx->res == VK_SUCCESS);
13456 
13457  // Go over all blocks. Create and bind buffer for whole block if necessary.
13458  {
13459  VkBufferCreateInfo bufCreateInfo;
13460  VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo);
13461 
13462  for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex)
13463  {
13464  VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex];
13465  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13466  if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0)
13467  {
13468  bufCreateInfo.size = pBlock->m_pMetadata->GetSize();
13469  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)(
13470  m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer);
13471  if(pDefragCtx->res == VK_SUCCESS)
13472  {
13473  pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)(
13474  m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0);
13475  }
13476  }
13477  }
13478  }
13479 
13480  // Go over all moves. Post data transfer commands to command buffer.
13481  if(pDefragCtx->res == VK_SUCCESS)
13482  {
13483  for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex)
13484  {
13485  const VmaDefragmentationMove& move = moves[moveIndex];
13486 
13487  const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex];
13488  const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex];
13489 
13490  VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer);
13491 
13492  VkBufferCopy region = {
13493  move.srcOffset,
13494  move.dstOffset,
13495  move.size };
13496  (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)(
13497  commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, &region);
13498  }
13499  }
13500 
13501  // Save buffers to defrag context for later destruction.
13502  if(pDefragCtx->res == VK_SUCCESS && moveCount > 0)
13503  {
13504  pDefragCtx->res = VK_NOT_READY;
13505  }
13506 }
13507 
13508 void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats)
13509 {
13510  for(size_t blockIndex = m_Blocks.size(); blockIndex--; )
13511  {
13512  VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex];
13513  if(pBlock->m_pMetadata->IsEmpty())
13514  {
13515  if(m_Blocks.size() > m_MinBlockCount)
13516  {
13517  if(pDefragmentationStats != VMA_NULL)
13518  {
13519  ++pDefragmentationStats->deviceMemoryBlocksFreed;
13520  pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize();
13521  }
13522 
13523  VmaVectorRemove(m_Blocks, blockIndex);
13524  pBlock->Destroy(m_hAllocator);
13525  vma_delete(m_hAllocator, pBlock);
13526  }
13527  else
13528  {
13529  break;
13530  }
13531  }
13532  }
13533  UpdateHasEmptyBlock();
13534 }
13535 
13536 void VmaBlockVector::UpdateHasEmptyBlock()
13537 {
13538  m_HasEmptyBlock = false;
13539  for(size_t index = 0, count = m_Blocks.size(); index < count; ++index)
13540  {
13541  VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
13542  if(pBlock->m_pMetadata->IsEmpty())
13543  {
13544  m_HasEmptyBlock = true;
13545  break;
13546  }
13547  }
13548 }
13549 
13550 #if VMA_STATS_STRING_ENABLED
13551 
13552 void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
13553 {
13554  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13555 
13556  json.BeginObject();
13557 
13558  if(IsCustomPool())
13559  {
13560  const char* poolName = m_hParentPool->GetName();
13561  if(poolName != VMA_NULL && poolName[0] != '\0')
13562  {
13563  json.WriteString("Name");
13564  json.WriteString(poolName);
13565  }
13566 
13567  json.WriteString("MemoryTypeIndex");
13568  json.WriteNumber(m_MemoryTypeIndex);
13569 
13570  json.WriteString("BlockSize");
13571  json.WriteNumber(m_PreferredBlockSize);
13572 
13573  json.WriteString("BlockCount");
13574  json.BeginObject(true);
13575  if(m_MinBlockCount > 0)
13576  {
13577  json.WriteString("Min");
13578  json.WriteNumber((uint64_t)m_MinBlockCount);
13579  }
13580  if(m_MaxBlockCount < SIZE_MAX)
13581  {
13582  json.WriteString("Max");
13583  json.WriteNumber((uint64_t)m_MaxBlockCount);
13584  }
13585  json.WriteString("Cur");
13586  json.WriteNumber((uint64_t)m_Blocks.size());
13587  json.EndObject();
13588 
13589  if(m_FrameInUseCount > 0)
13590  {
13591  json.WriteString("FrameInUseCount");
13592  json.WriteNumber(m_FrameInUseCount);
13593  }
13594 
13595  if(m_Algorithm != 0)
13596  {
13597  json.WriteString("Algorithm");
13598  json.WriteString(VmaAlgorithmToStr(m_Algorithm));
13599  }
13600  }
13601  else
13602  {
13603  json.WriteString("PreferredBlockSize");
13604  json.WriteNumber(m_PreferredBlockSize);
13605  }
13606 
13607  json.WriteString("Blocks");
13608  json.BeginObject();
13609  for(size_t i = 0; i < m_Blocks.size(); ++i)
13610  {
13611  json.BeginString();
13612  json.ContinueString(m_Blocks[i]->GetId());
13613  json.EndString();
13614 
13615  m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
13616  }
13617  json.EndObject();
13618 
13619  json.EndObject();
13620 }
13621 
13622 #endif // #if VMA_STATS_STRING_ENABLED
13623 
13624 void VmaBlockVector::Defragment(
13625  class VmaBlockVectorDefragmentationContext* pCtx,
13627  VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove,
13628  VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove,
13629  VkCommandBuffer commandBuffer)
13630 {
13631  pCtx->res = VK_SUCCESS;
13632 
13633  const VkMemoryPropertyFlags memPropFlags =
13634  m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags;
13635  const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
13636 
13637  const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 &&
13638  isHostVisible;
13639  const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 &&
13640  !IsCorruptionDetectionEnabled() &&
13641  ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0;
13642 
13643  // There are options to defragment this memory type.
13644  if(canDefragmentOnCpu || canDefragmentOnGpu)
13645  {
13646  bool defragmentOnGpu;
13647  // There is only one option to defragment this memory type.
13648  if(canDefragmentOnGpu != canDefragmentOnCpu)
13649  {
13650  defragmentOnGpu = canDefragmentOnGpu;
13651  }
13652  // Both options are available: Heuristics to choose the best one.
13653  else
13654  {
13655  defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 ||
13656  m_hAllocator->IsIntegratedGpu();
13657  }
13658 
13659  bool overlappingMoveSupported = !defragmentOnGpu;
13660 
13661  if(m_hAllocator->m_UseMutex)
13662  {
13664  {
13665  if(!m_Mutex.TryLockWrite())
13666  {
13667  pCtx->res = VK_ERROR_INITIALIZATION_FAILED;
13668  return;
13669  }
13670  }
13671  else
13672  {
13673  m_Mutex.LockWrite();
13674  pCtx->mutexLocked = true;
13675  }
13676  }
13677 
13678  pCtx->Begin(overlappingMoveSupported, flags);
13679 
13680  // Defragment.
13681 
13682  const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove;
13683  const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove;
13684  pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags);
13685 
13686  // Accumulate statistics.
13687  if(pStats != VMA_NULL)
13688  {
13689  const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved();
13690  const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved();
13691  pStats->bytesMoved += bytesMoved;
13692  pStats->allocationsMoved += allocationsMoved;
13693  VMA_ASSERT(bytesMoved <= maxBytesToMove);
13694  VMA_ASSERT(allocationsMoved <= maxAllocationsToMove);
13695  if(defragmentOnGpu)
13696  {
13697  maxGpuBytesToMove -= bytesMoved;
13698  maxGpuAllocationsToMove -= allocationsMoved;
13699  }
13700  else
13701  {
13702  maxCpuBytesToMove -= bytesMoved;
13703  maxCpuAllocationsToMove -= allocationsMoved;
13704  }
13705  }
13706 
13708  {
13709  if(m_hAllocator->m_UseMutex)
13710  m_Mutex.UnlockWrite();
13711 
13712  if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty())
13713  pCtx->res = VK_NOT_READY;
13714 
13715  return;
13716  }
13717 
13718  if(pCtx->res >= VK_SUCCESS)
13719  {
13720  if(defragmentOnGpu)
13721  {
13722  ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer);
13723  }
13724  else
13725  {
13726  ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves);
13727  }
13728  }
13729  }
13730 }
13731 
13732 void VmaBlockVector::DefragmentationEnd(
13733  class VmaBlockVectorDefragmentationContext* pCtx,
13734  uint32_t flags,
13735  VmaDefragmentationStats* pStats)
13736 {
13737  if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex)
13738  {
13739  VMA_ASSERT(pCtx->mutexLocked == false);
13740 
13741  // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any
13742  // lock protecting us. Since we mutate state here, we have to take the lock out now
13743  m_Mutex.LockWrite();
13744  pCtx->mutexLocked = true;
13745  }
13746 
13747  // If the mutex isn't locked we didn't do any work and there is nothing to delete.
13748  if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex)
13749  {
13750  // Destroy buffers.
13751  for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;)
13752  {
13753  VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex];
13754  if(blockCtx.hBuffer)
13755  {
13756  (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks());
13757  }
13758  }
13759 
13760  if(pCtx->res >= VK_SUCCESS)
13761  {
13762  FreeEmptyBlocks(pStats);
13763  }
13764  }
13765 
13766  if(pCtx->mutexLocked)
13767  {
13768  VMA_ASSERT(m_hAllocator->m_UseMutex);
13769  m_Mutex.UnlockWrite();
13770  }
13771 }
13772 
13773 uint32_t VmaBlockVector::ProcessDefragmentations(
13774  class VmaBlockVectorDefragmentationContext *pCtx,
13775  VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves)
13776 {
13777  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13778 
13779  const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves);
13780 
13781  for(uint32_t i = 0; i < moveCount; ++ i)
13782  {
13783  VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i];
13784 
13785  pMove->allocation = move.hAllocation;
13786  pMove->memory = move.pDstBlock->GetDeviceMemory();
13787  pMove->offset = move.dstOffset;
13788 
13789  ++ pMove;
13790  }
13791 
13792  pCtx->defragmentationMovesProcessed += moveCount;
13793 
13794  return moveCount;
13795 }
13796 
13797 void VmaBlockVector::CommitDefragmentations(
13798  class VmaBlockVectorDefragmentationContext *pCtx,
13799  VmaDefragmentationStats* pStats)
13800 {
13801  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13802 
13803  for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i)
13804  {
13805  const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i];
13806 
13807  move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset);
13808  move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset);
13809  }
13810 
13811  pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed;
13812  FreeEmptyBlocks(pStats);
13813 }
13814 
13815 size_t VmaBlockVector::CalcAllocationCount() const
13816 {
13817  size_t result = 0;
13818  for(size_t i = 0; i < m_Blocks.size(); ++i)
13819  {
13820  result += m_Blocks[i]->m_pMetadata->GetAllocationCount();
13821  }
13822  return result;
13823 }
13824 
13825 bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const
13826 {
13827  if(m_BufferImageGranularity == 1)
13828  {
13829  return false;
13830  }
13831  VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE;
13832  for(size_t i = 0, count = m_Blocks.size(); i < count; ++i)
13833  {
13834  VmaDeviceMemoryBlock* const pBlock = m_Blocks[i];
13835  VMA_ASSERT(m_Algorithm == 0);
13836  VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata;
13837  if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType))
13838  {
13839  return true;
13840  }
13841  }
13842  return false;
13843 }
13844 
13845 void VmaBlockVector::MakePoolAllocationsLost(
13846  uint32_t currentFrameIndex,
13847  size_t* pLostAllocationCount)
13848 {
13849  VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
13850  size_t lostAllocationCount = 0;
13851  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13852  {
13853  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13854  VMA_ASSERT(pBlock);
13855  lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount);
13856  }
13857  if(pLostAllocationCount != VMA_NULL)
13858  {
13859  *pLostAllocationCount = lostAllocationCount;
13860  }
13861 }
13862 
13863 VkResult VmaBlockVector::CheckCorruption()
13864 {
13865  if(!IsCorruptionDetectionEnabled())
13866  {
13867  return VK_ERROR_FEATURE_NOT_PRESENT;
13868  }
13869 
13870  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13871  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13872  {
13873  VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13874  VMA_ASSERT(pBlock);
13875  VkResult res = pBlock->CheckCorruption(m_hAllocator);
13876  if(res != VK_SUCCESS)
13877  {
13878  return res;
13879  }
13880  }
13881  return VK_SUCCESS;
13882 }
13883 
13884 void VmaBlockVector::AddStats(VmaStats* pStats)
13885 {
13886  const uint32_t memTypeIndex = m_MemoryTypeIndex;
13887  const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex);
13888 
13889  VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
13890 
13891  for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
13892  {
13893  const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
13894  VMA_ASSERT(pBlock);
13895  VMA_HEAVY_ASSERT(pBlock->Validate());
13896  VmaStatInfo allocationStatInfo;
13897  pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo);
13898  VmaAddStatInfo(pStats->total, allocationStatInfo);
13899  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
13900  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
13901  }
13902 }
13903 
13905 // VmaDefragmentationAlgorithm_Generic members definition
13906 
13907 VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic(
13908  VmaAllocator hAllocator,
13909  VmaBlockVector* pBlockVector,
13910  uint32_t currentFrameIndex,
13911  bool overlappingMoveSupported) :
13912  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
13913  m_AllocationCount(0),
13914  m_AllAllocations(false),
13915  m_BytesMoved(0),
13916  m_AllocationsMoved(0),
13917  m_Blocks(VmaStlAllocator<BlockInfo*>(hAllocator->GetAllocationCallbacks()))
13918 {
13919  // Create block info for each block.
13920  const size_t blockCount = m_pBlockVector->m_Blocks.size();
13921  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
13922  {
13923  BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks());
13924  pBlockInfo->m_OriginalBlockIndex = blockIndex;
13925  pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex];
13926  m_Blocks.push_back(pBlockInfo);
13927  }
13928 
13929  // Sort them by m_pBlock pointer value.
13930  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess());
13931 }
13932 
13933 VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic()
13934 {
13935  for(size_t i = m_Blocks.size(); i--; )
13936  {
13937  vma_delete(m_hAllocator, m_Blocks[i]);
13938  }
13939 }
13940 
13941 void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
13942 {
13943  // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost.
13944  if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)
13945  {
13946  VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock();
13947  BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess());
13948  if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock)
13949  {
13950  AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged);
13951  (*it)->m_Allocations.push_back(allocInfo);
13952  }
13953  else
13954  {
13955  VMA_ASSERT(0);
13956  }
13957 
13958  ++m_AllocationCount;
13959  }
13960 }
13961 
13962 VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound(
13963  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
13964  VkDeviceSize maxBytesToMove,
13965  uint32_t maxAllocationsToMove,
13966  bool freeOldAllocations)
13967 {
13968  if(m_Blocks.empty())
13969  {
13970  return VK_SUCCESS;
13971  }
13972 
13973  // This is a choice based on research.
13974  // Option 1:
13975  uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT;
13976  // Option 2:
13977  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT;
13978  // Option 3:
13979  //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT;
13980 
13981  size_t srcBlockMinIndex = 0;
13982  // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations.
13983  /*
13984  if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT)
13985  {
13986  const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount();
13987  if(blocksWithNonMovableCount > 0)
13988  {
13989  srcBlockMinIndex = blocksWithNonMovableCount - 1;
13990  }
13991  }
13992  */
13993 
13994  size_t srcBlockIndex = m_Blocks.size() - 1;
13995  size_t srcAllocIndex = SIZE_MAX;
13996  for(;;)
13997  {
13998  // 1. Find next allocation to move.
13999  // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source".
14000  // 1.2. Then start from last to first m_Allocations.
14001  while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size())
14002  {
14003  if(m_Blocks[srcBlockIndex]->m_Allocations.empty())
14004  {
14005  // Finished: no more allocations to process.
14006  if(srcBlockIndex == srcBlockMinIndex)
14007  {
14008  return VK_SUCCESS;
14009  }
14010  else
14011  {
14012  --srcBlockIndex;
14013  srcAllocIndex = SIZE_MAX;
14014  }
14015  }
14016  else
14017  {
14018  srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1;
14019  }
14020  }
14021 
14022  BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex];
14023  AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex];
14024 
14025  const VkDeviceSize size = allocInfo.m_hAllocation->GetSize();
14026  const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset();
14027  const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment();
14028  const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType();
14029 
14030  // 2. Try to find new place for this allocation in preceding or current block.
14031  for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex)
14032  {
14033  BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex];
14034  VmaAllocationRequest dstAllocRequest;
14035  if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest(
14036  m_CurrentFrameIndex,
14037  m_pBlockVector->GetFrameInUseCount(),
14038  m_pBlockVector->GetBufferImageGranularity(),
14039  size,
14040  alignment,
14041  false, // upperAddress
14042  suballocType,
14043  false, // canMakeOtherLost
14044  strategy,
14045  &dstAllocRequest) &&
14046  MoveMakesSense(
14047  dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset))
14048  {
14049  VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0);
14050 
14051  // Reached limit on number of allocations or bytes to move.
14052  if((m_AllocationsMoved + 1 > maxAllocationsToMove) ||
14053  (m_BytesMoved + size > maxBytesToMove))
14054  {
14055  return VK_SUCCESS;
14056  }
14057 
14058  VmaDefragmentationMove move = {};
14059  move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex;
14060  move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex;
14061  move.srcOffset = srcOffset;
14062  move.dstOffset = dstAllocRequest.offset;
14063  move.size = size;
14064  move.hAllocation = allocInfo.m_hAllocation;
14065  move.pSrcBlock = pSrcBlockInfo->m_pBlock;
14066  move.pDstBlock = pDstBlockInfo->m_pBlock;
14067 
14068  moves.push_back(move);
14069 
14070  pDstBlockInfo->m_pBlock->m_pMetadata->Alloc(
14071  dstAllocRequest,
14072  suballocType,
14073  size,
14074  allocInfo.m_hAllocation);
14075 
14076  if(freeOldAllocations)
14077  {
14078  pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset);
14079  allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset);
14080  }
14081 
14082  if(allocInfo.m_pChanged != VMA_NULL)
14083  {
14084  *allocInfo.m_pChanged = VK_TRUE;
14085  }
14086 
14087  ++m_AllocationsMoved;
14088  m_BytesMoved += size;
14089 
14090  VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex);
14091 
14092  break;
14093  }
14094  }
14095 
14096  // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round.
14097 
14098  if(srcAllocIndex > 0)
14099  {
14100  --srcAllocIndex;
14101  }
14102  else
14103  {
14104  if(srcBlockIndex > 0)
14105  {
14106  --srcBlockIndex;
14107  srcAllocIndex = SIZE_MAX;
14108  }
14109  else
14110  {
14111  return VK_SUCCESS;
14112  }
14113  }
14114  }
14115 }
14116 
14117 size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const
14118 {
14119  size_t result = 0;
14120  for(size_t i = 0; i < m_Blocks.size(); ++i)
14121  {
14122  if(m_Blocks[i]->m_HasNonMovableAllocations)
14123  {
14124  ++result;
14125  }
14126  }
14127  return result;
14128 }
14129 
14130 VkResult VmaDefragmentationAlgorithm_Generic::Defragment(
14131  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14132  VkDeviceSize maxBytesToMove,
14133  uint32_t maxAllocationsToMove,
14135 {
14136  if(!m_AllAllocations && m_AllocationCount == 0)
14137  {
14138  return VK_SUCCESS;
14139  }
14140 
14141  const size_t blockCount = m_Blocks.size();
14142  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14143  {
14144  BlockInfo* pBlockInfo = m_Blocks[blockIndex];
14145 
14146  if(m_AllAllocations)
14147  {
14148  VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata;
14149  for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin();
14150  it != pMetadata->m_Suballocations.end();
14151  ++it)
14152  {
14153  if(it->type != VMA_SUBALLOCATION_TYPE_FREE)
14154  {
14155  AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL);
14156  pBlockInfo->m_Allocations.push_back(allocInfo);
14157  }
14158  }
14159  }
14160 
14161  pBlockInfo->CalcHasNonMovableAllocations();
14162 
14163  // This is a choice based on research.
14164  // Option 1:
14165  pBlockInfo->SortAllocationsByOffsetDescending();
14166  // Option 2:
14167  //pBlockInfo->SortAllocationsBySizeDescending();
14168  }
14169 
14170  // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks.
14171  VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination());
14172 
14173  // This is a choice based on research.
14174  const uint32_t roundCount = 2;
14175 
14176  // Execute defragmentation rounds (the main part).
14177  VkResult result = VK_SUCCESS;
14178  for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round)
14179  {
14180  result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL));
14181  }
14182 
14183  return result;
14184 }
14185 
14186 bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense(
14187  size_t dstBlockIndex, VkDeviceSize dstOffset,
14188  size_t srcBlockIndex, VkDeviceSize srcOffset)
14189 {
14190  if(dstBlockIndex < srcBlockIndex)
14191  {
14192  return true;
14193  }
14194  if(dstBlockIndex > srcBlockIndex)
14195  {
14196  return false;
14197  }
14198  if(dstOffset < srcOffset)
14199  {
14200  return true;
14201  }
14202  return false;
14203 }
14204 
14206 // VmaDefragmentationAlgorithm_Fast
14207 
14208 VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast(
14209  VmaAllocator hAllocator,
14210  VmaBlockVector* pBlockVector,
14211  uint32_t currentFrameIndex,
14212  bool overlappingMoveSupported) :
14213  VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex),
14214  m_OverlappingMoveSupported(overlappingMoveSupported),
14215  m_AllocationCount(0),
14216  m_AllAllocations(false),
14217  m_BytesMoved(0),
14218  m_AllocationsMoved(0),
14219  m_BlockInfos(VmaStlAllocator<BlockInfo>(hAllocator->GetAllocationCallbacks()))
14220 {
14221  VMA_ASSERT(VMA_DEBUG_MARGIN == 0);
14222 
14223 }
14224 
14225 VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast()
14226 {
14227 }
14228 
14229 VkResult VmaDefragmentationAlgorithm_Fast::Defragment(
14230  VmaVector< VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove> >& moves,
14231  VkDeviceSize maxBytesToMove,
14232  uint32_t maxAllocationsToMove,
14234 {
14235  VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount);
14236 
14237  const size_t blockCount = m_pBlockVector->GetBlockCount();
14238  if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0)
14239  {
14240  return VK_SUCCESS;
14241  }
14242 
14243  PreprocessMetadata();
14244 
14245  // Sort blocks in order from most destination.
14246 
14247  m_BlockInfos.resize(blockCount);
14248  for(size_t i = 0; i < blockCount; ++i)
14249  {
14250  m_BlockInfos[i].origBlockIndex = i;
14251  }
14252 
14253  VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool {
14254  return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() <
14255  m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize();
14256  });
14257 
14258  // THE MAIN ALGORITHM
14259 
14260  FreeSpaceDatabase freeSpaceDb;
14261 
14262  size_t dstBlockInfoIndex = 0;
14263  size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14264  VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14265  VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14266  VkDeviceSize dstBlockSize = pDstMetadata->GetSize();
14267  VkDeviceSize dstOffset = 0;
14268 
14269  bool end = false;
14270  for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex)
14271  {
14272  const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex;
14273  VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex);
14274  VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata;
14275  for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin();
14276  !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); )
14277  {
14278  VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation;
14279  const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment();
14280  const VkDeviceSize srcAllocSize = srcSuballocIt->size;
14281  if(m_AllocationsMoved == maxAllocationsToMove ||
14282  m_BytesMoved + srcAllocSize > maxBytesToMove)
14283  {
14284  end = true;
14285  break;
14286  }
14287  const VkDeviceSize srcAllocOffset = srcSuballocIt->offset;
14288 
14289  VmaDefragmentationMove move = {};
14290  // Try to place it in one of free spaces from the database.
14291  size_t freeSpaceInfoIndex;
14292  VkDeviceSize dstAllocOffset;
14293  if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize,
14294  freeSpaceInfoIndex, dstAllocOffset))
14295  {
14296  size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex;
14297  VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex);
14298  VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata;
14299 
14300  // Same block
14301  if(freeSpaceInfoIndex == srcBlockInfoIndex)
14302  {
14303  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14304 
14305  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14306 
14307  VmaSuballocation suballoc = *srcSuballocIt;
14308  suballoc.offset = dstAllocOffset;
14309  suballoc.hAllocation->ChangeOffset(dstAllocOffset);
14310  m_BytesMoved += srcAllocSize;
14311  ++m_AllocationsMoved;
14312 
14313  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14314  ++nextSuballocIt;
14315  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14316  srcSuballocIt = nextSuballocIt;
14317 
14318  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14319 
14320  move.srcBlockIndex = srcOrigBlockIndex;
14321  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14322  move.srcOffset = srcAllocOffset;
14323  move.dstOffset = dstAllocOffset;
14324  move.size = srcAllocSize;
14325 
14326  moves.push_back(move);
14327  }
14328  // Different block
14329  else
14330  {
14331  // MOVE OPTION 2: Move the allocation to a different block.
14332 
14333  VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex);
14334 
14335  VmaSuballocation suballoc = *srcSuballocIt;
14336  suballoc.offset = dstAllocOffset;
14337  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset);
14338  m_BytesMoved += srcAllocSize;
14339  ++m_AllocationsMoved;
14340 
14341  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14342  ++nextSuballocIt;
14343  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14344  srcSuballocIt = nextSuballocIt;
14345 
14346  InsertSuballoc(pFreeSpaceMetadata, suballoc);
14347 
14348  move.srcBlockIndex = srcOrigBlockIndex;
14349  move.dstBlockIndex = freeSpaceOrigBlockIndex;
14350  move.srcOffset = srcAllocOffset;
14351  move.dstOffset = dstAllocOffset;
14352  move.size = srcAllocSize;
14353 
14354  moves.push_back(move);
14355  }
14356  }
14357  else
14358  {
14359  dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment);
14360 
14361  // If the allocation doesn't fit before the end of dstBlock, forward to next block.
14362  while(dstBlockInfoIndex < srcBlockInfoIndex &&
14363  dstAllocOffset + srcAllocSize > dstBlockSize)
14364  {
14365  // But before that, register remaining free space at the end of dst block.
14366  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset);
14367 
14368  ++dstBlockInfoIndex;
14369  dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex;
14370  pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex);
14371  pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata;
14372  dstBlockSize = pDstMetadata->GetSize();
14373  dstOffset = 0;
14374  dstAllocOffset = 0;
14375  }
14376 
14377  // Same block
14378  if(dstBlockInfoIndex == srcBlockInfoIndex)
14379  {
14380  VMA_ASSERT(dstAllocOffset <= srcAllocOffset);
14381 
14382  const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset;
14383 
14384  bool skipOver = overlap;
14385  if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset)
14386  {
14387  // If destination and source place overlap, skip if it would move it
14388  // by only < 1/64 of its size.
14389  skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize;
14390  }
14391 
14392  if(skipOver)
14393  {
14394  freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset);
14395 
14396  dstOffset = srcAllocOffset + srcAllocSize;
14397  ++srcSuballocIt;
14398  }
14399  // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset.
14400  else
14401  {
14402  srcSuballocIt->offset = dstAllocOffset;
14403  srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset);
14404  dstOffset = dstAllocOffset + srcAllocSize;
14405  m_BytesMoved += srcAllocSize;
14406  ++m_AllocationsMoved;
14407  ++srcSuballocIt;
14408 
14409  move.srcBlockIndex = srcOrigBlockIndex;
14410  move.dstBlockIndex = dstOrigBlockIndex;
14411  move.srcOffset = srcAllocOffset;
14412  move.dstOffset = dstAllocOffset;
14413  move.size = srcAllocSize;
14414 
14415  moves.push_back(move);
14416  }
14417  }
14418  // Different block
14419  else
14420  {
14421  // MOVE OPTION 2: Move the allocation to a different block.
14422 
14423  VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex);
14424  VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize);
14425 
14426  VmaSuballocation suballoc = *srcSuballocIt;
14427  suballoc.offset = dstAllocOffset;
14428  suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset);
14429  dstOffset = dstAllocOffset + srcAllocSize;
14430  m_BytesMoved += srcAllocSize;
14431  ++m_AllocationsMoved;
14432 
14433  VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt;
14434  ++nextSuballocIt;
14435  pSrcMetadata->m_Suballocations.erase(srcSuballocIt);
14436  srcSuballocIt = nextSuballocIt;
14437 
14438  pDstMetadata->m_Suballocations.push_back(suballoc);
14439 
14440  move.srcBlockIndex = srcOrigBlockIndex;
14441  move.dstBlockIndex = dstOrigBlockIndex;
14442  move.srcOffset = srcAllocOffset;
14443  move.dstOffset = dstAllocOffset;
14444  move.size = srcAllocSize;
14445 
14446  moves.push_back(move);
14447  }
14448  }
14449  }
14450  }
14451 
14452  m_BlockInfos.clear();
14453 
14454  PostprocessMetadata();
14455 
14456  return VK_SUCCESS;
14457 }
14458 
14459 void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata()
14460 {
14461  const size_t blockCount = m_pBlockVector->GetBlockCount();
14462  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14463  {
14464  VmaBlockMetadata_Generic* const pMetadata =
14465  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14466  pMetadata->m_FreeCount = 0;
14467  pMetadata->m_SumFreeSize = pMetadata->GetSize();
14468  pMetadata->m_FreeSuballocationsBySize.clear();
14469  for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14470  it != pMetadata->m_Suballocations.end(); )
14471  {
14472  if(it->type == VMA_SUBALLOCATION_TYPE_FREE)
14473  {
14474  VmaSuballocationList::iterator nextIt = it;
14475  ++nextIt;
14476  pMetadata->m_Suballocations.erase(it);
14477  it = nextIt;
14478  }
14479  else
14480  {
14481  ++it;
14482  }
14483  }
14484  }
14485 }
14486 
14487 void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata()
14488 {
14489  const size_t blockCount = m_pBlockVector->GetBlockCount();
14490  for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
14491  {
14492  VmaBlockMetadata_Generic* const pMetadata =
14493  (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata;
14494  const VkDeviceSize blockSize = pMetadata->GetSize();
14495 
14496  // No allocations in this block - entire area is free.
14497  if(pMetadata->m_Suballocations.empty())
14498  {
14499  pMetadata->m_FreeCount = 1;
14500  //pMetadata->m_SumFreeSize is already set to blockSize.
14501  VmaSuballocation suballoc = {
14502  0, // offset
14503  blockSize, // size
14504  VMA_NULL, // hAllocation
14505  VMA_SUBALLOCATION_TYPE_FREE };
14506  pMetadata->m_Suballocations.push_back(suballoc);
14507  pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin());
14508  }
14509  // There are some allocations in this block.
14510  else
14511  {
14512  VkDeviceSize offset = 0;
14513  VmaSuballocationList::iterator it;
14514  for(it = pMetadata->m_Suballocations.begin();
14515  it != pMetadata->m_Suballocations.end();
14516  ++it)
14517  {
14518  VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE);
14519  VMA_ASSERT(it->offset >= offset);
14520 
14521  // Need to insert preceding free space.
14522  if(it->offset > offset)
14523  {
14524  ++pMetadata->m_FreeCount;
14525  const VkDeviceSize freeSize = it->offset - offset;
14526  VmaSuballocation suballoc = {
14527  offset, // offset
14528  freeSize, // size
14529  VMA_NULL, // hAllocation
14530  VMA_SUBALLOCATION_TYPE_FREE };
14531  VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14532  if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14533  {
14534  pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt);
14535  }
14536  }
14537 
14538  pMetadata->m_SumFreeSize -= it->size;
14539  offset = it->offset + it->size;
14540  }
14541 
14542  // Need to insert trailing free space.
14543  if(offset < blockSize)
14544  {
14545  ++pMetadata->m_FreeCount;
14546  const VkDeviceSize freeSize = blockSize - offset;
14547  VmaSuballocation suballoc = {
14548  offset, // offset
14549  freeSize, // size
14550  VMA_NULL, // hAllocation
14551  VMA_SUBALLOCATION_TYPE_FREE };
14552  VMA_ASSERT(it == pMetadata->m_Suballocations.end());
14553  VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc);
14554  if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER)
14555  {
14556  pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt);
14557  }
14558  }
14559 
14560  VMA_SORT(
14561  pMetadata->m_FreeSuballocationsBySize.begin(),
14562  pMetadata->m_FreeSuballocationsBySize.end(),
14563  VmaSuballocationItemSizeLess());
14564  }
14565 
14566  VMA_HEAVY_ASSERT(pMetadata->Validate());
14567  }
14568 }
14569 
14570 void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc)
14571 {
14572  // TODO: Optimize somehow. Remember iterator instead of searching for it linearly.
14573  VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin();
14574  while(it != pMetadata->m_Suballocations.end())
14575  {
14576  if(it->offset < suballoc.offset)
14577  {
14578  ++it;
14579  }
14580  }
14581  pMetadata->m_Suballocations.insert(it, suballoc);
14582 }
14583 
14585 // VmaBlockVectorDefragmentationContext
14586 
14587 VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext(
14588  VmaAllocator hAllocator,
14589  VmaPool hCustomPool,
14590  VmaBlockVector* pBlockVector,
14591  uint32_t currFrameIndex) :
14592  res(VK_SUCCESS),
14593  mutexLocked(false),
14594  blockContexts(VmaStlAllocator<VmaBlockDefragmentationContext>(hAllocator->GetAllocationCallbacks())),
14595  defragmentationMoves(VmaStlAllocator<VmaDefragmentationMove>(hAllocator->GetAllocationCallbacks())),
14596  defragmentationMovesProcessed(0),
14597  defragmentationMovesCommitted(0),
14598  hasDefragmentationPlan(0),
14599  m_hAllocator(hAllocator),
14600  m_hCustomPool(hCustomPool),
14601  m_pBlockVector(pBlockVector),
14602  m_CurrFrameIndex(currFrameIndex),
14603  m_pAlgorithm(VMA_NULL),
14604  m_Allocations(VmaStlAllocator<AllocInfo>(hAllocator->GetAllocationCallbacks())),
14605  m_AllAllocations(false)
14606 {
14607 }
14608 
14609 VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext()
14610 {
14611  vma_delete(m_hAllocator, m_pAlgorithm);
14612 }
14613 
14614 void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged)
14615 {
14616  AllocInfo info = { hAlloc, pChanged };
14617  m_Allocations.push_back(info);
14618 }
14619 
14620 void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags)
14621 {
14622  const bool allAllocations = m_AllAllocations ||
14623  m_Allocations.size() == m_pBlockVector->CalcAllocationCount();
14624 
14625  /********************************
14626  HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM.
14627  ********************************/
14628 
14629  /*
14630  Fast algorithm is supported only when certain criteria are met:
14631  - VMA_DEBUG_MARGIN is 0.
14632  - All allocations in this block vector are moveable.
14633  - There is no possibility of image/buffer granularity conflict.
14634  - The defragmentation is not incremental
14635  */
14636  if(VMA_DEBUG_MARGIN == 0 &&
14637  allAllocations &&
14638  !m_pBlockVector->IsBufferImageGranularityConflictPossible() &&
14640  {
14641  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)(
14642  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14643  }
14644  else
14645  {
14646  m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)(
14647  m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported);
14648  }
14649 
14650  if(allAllocations)
14651  {
14652  m_pAlgorithm->AddAll();
14653  }
14654  else
14655  {
14656  for(size_t i = 0, count = m_Allocations.size(); i < count; ++i)
14657  {
14658  m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged);
14659  }
14660  }
14661 }
14662 
14664 // VmaDefragmentationContext
14665 
14666 VmaDefragmentationContext_T::VmaDefragmentationContext_T(
14667  VmaAllocator hAllocator,
14668  uint32_t currFrameIndex,
14669  uint32_t flags,
14670  VmaDefragmentationStats* pStats) :
14671  m_hAllocator(hAllocator),
14672  m_CurrFrameIndex(currFrameIndex),
14673  m_Flags(flags),
14674  m_pStats(pStats),
14675  m_CustomPoolContexts(VmaStlAllocator<VmaBlockVectorDefragmentationContext*>(hAllocator->GetAllocationCallbacks()))
14676 {
14677  memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts));
14678 }
14679 
14680 VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
14681 {
14682  for(size_t i = m_CustomPoolContexts.size(); i--; )
14683  {
14684  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i];
14685  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14686  vma_delete(m_hAllocator, pBlockVectorCtx);
14687  }
14688  for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; )
14689  {
14690  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i];
14691  if(pBlockVectorCtx)
14692  {
14693  pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats);
14694  vma_delete(m_hAllocator, pBlockVectorCtx);
14695  }
14696  }
14697 }
14698 
14699 void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools)
14700 {
14701  for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
14702  {
14703  VmaPool pool = pPools[poolIndex];
14704  VMA_ASSERT(pool);
14705  // Pools with algorithm other than default are not defragmented.
14706  if(pool->m_BlockVector.GetAlgorithm() == 0)
14707  {
14708  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14709 
14710  for(size_t i = m_CustomPoolContexts.size(); i--; )
14711  {
14712  if(m_CustomPoolContexts[i]->GetCustomPool() == pool)
14713  {
14714  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14715  break;
14716  }
14717  }
14718 
14719  if(!pBlockVectorDefragCtx)
14720  {
14721  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14722  m_hAllocator,
14723  pool,
14724  &pool->m_BlockVector,
14725  m_CurrFrameIndex);
14726  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14727  }
14728 
14729  pBlockVectorDefragCtx->AddAll();
14730  }
14731  }
14732 }
14733 
14734 void VmaDefragmentationContext_T::AddAllocations(
14735  uint32_t allocationCount,
14736  const VmaAllocation* pAllocations,
14737  VkBool32* pAllocationsChanged)
14738 {
14739  // Dispatch pAllocations among defragmentators. Create them when necessary.
14740  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
14741  {
14742  const VmaAllocation hAlloc = pAllocations[allocIndex];
14743  VMA_ASSERT(hAlloc);
14744  // DedicatedAlloc cannot be defragmented.
14745  if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) &&
14746  // Lost allocation cannot be defragmented.
14747  (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST))
14748  {
14749  VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
14750 
14751  const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool();
14752  // This allocation belongs to custom pool.
14753  if(hAllocPool != VK_NULL_HANDLE)
14754  {
14755  // Pools with algorithm other than default are not defragmented.
14756  if(hAllocPool->m_BlockVector.GetAlgorithm() == 0)
14757  {
14758  for(size_t i = m_CustomPoolContexts.size(); i--; )
14759  {
14760  if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool)
14761  {
14762  pBlockVectorDefragCtx = m_CustomPoolContexts[i];
14763  break;
14764  }
14765  }
14766  if(!pBlockVectorDefragCtx)
14767  {
14768  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14769  m_hAllocator,
14770  hAllocPool,
14771  &hAllocPool->m_BlockVector,
14772  m_CurrFrameIndex);
14773  m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
14774  }
14775  }
14776  }
14777  // This allocation belongs to default pool.
14778  else
14779  {
14780  const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
14781  pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
14782  if(!pBlockVectorDefragCtx)
14783  {
14784  pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
14785  m_hAllocator,
14786  VMA_NULL, // hCustomPool
14787  m_hAllocator->m_pBlockVectors[memTypeIndex],
14788  m_CurrFrameIndex);
14789  m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx;
14790  }
14791  }
14792 
14793  if(pBlockVectorDefragCtx)
14794  {
14795  VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ?
14796  &pAllocationsChanged[allocIndex] : VMA_NULL;
14797  pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged);
14798  }
14799  }
14800  }
14801 }
14802 
14803 VkResult VmaDefragmentationContext_T::Defragment(
14804  VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove,
14805  VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove,
14806  VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags)
14807 {
14808  if(pStats)
14809  {
14810  memset(pStats, 0, sizeof(VmaDefragmentationStats));
14811  }
14812 
14814  {
14815  // For incremental defragmetnations, we just earmark how much we can move
14816  // The real meat is in the defragmentation steps
14817  m_MaxCpuBytesToMove = maxCpuBytesToMove;
14818  m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove;
14819 
14820  m_MaxGpuBytesToMove = maxGpuBytesToMove;
14821  m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove;
14822 
14823  if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 &&
14824  m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0)
14825  return VK_SUCCESS;
14826 
14827  return VK_NOT_READY;
14828  }
14829 
14830  if(commandBuffer == VK_NULL_HANDLE)
14831  {
14832  maxGpuBytesToMove = 0;
14833  maxGpuAllocationsToMove = 0;
14834  }
14835 
14836  VkResult res = VK_SUCCESS;
14837 
14838  // Process default pools.
14839  for(uint32_t memTypeIndex = 0;
14840  memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS;
14841  ++memTypeIndex)
14842  {
14843  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14844  if(pBlockVectorCtx)
14845  {
14846  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14847  pBlockVectorCtx->GetBlockVector()->Defragment(
14848  pBlockVectorCtx,
14849  pStats, flags,
14850  maxCpuBytesToMove, maxCpuAllocationsToMove,
14851  maxGpuBytesToMove, maxGpuAllocationsToMove,
14852  commandBuffer);
14853  if(pBlockVectorCtx->res != VK_SUCCESS)
14854  {
14855  res = pBlockVectorCtx->res;
14856  }
14857  }
14858  }
14859 
14860  // Process custom pools.
14861  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14862  customCtxIndex < customCtxCount && res >= VK_SUCCESS;
14863  ++customCtxIndex)
14864  {
14865  VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14866  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14867  pBlockVectorCtx->GetBlockVector()->Defragment(
14868  pBlockVectorCtx,
14869  pStats, flags,
14870  maxCpuBytesToMove, maxCpuAllocationsToMove,
14871  maxGpuBytesToMove, maxGpuAllocationsToMove,
14872  commandBuffer);
14873  if(pBlockVectorCtx->res != VK_SUCCESS)
14874  {
14875  res = pBlockVectorCtx->res;
14876  }
14877  }
14878 
14879  return res;
14880 }
14881 
14882 VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo)
14883 {
14884  VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves;
14885  uint32_t movesLeft = pInfo->moveCount;
14886 
14887  // Process default pools.
14888  for(uint32_t memTypeIndex = 0;
14889  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14890  ++memTypeIndex)
14891  {
14892  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14893  if(pBlockVectorCtx)
14894  {
14895  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14896 
14897  if(!pBlockVectorCtx->hasDefragmentationPlan)
14898  {
14899  pBlockVectorCtx->GetBlockVector()->Defragment(
14900  pBlockVectorCtx,
14901  m_pStats, m_Flags,
14902  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14903  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14904  VK_NULL_HANDLE);
14905 
14906  if(pBlockVectorCtx->res < VK_SUCCESS)
14907  continue;
14908 
14909  pBlockVectorCtx->hasDefragmentationPlan = true;
14910  }
14911 
14912  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14913  pBlockVectorCtx,
14914  pCurrentMove, movesLeft);
14915 
14916  movesLeft -= processed;
14917  pCurrentMove += processed;
14918  }
14919  }
14920 
14921  // Process custom pools.
14922  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14923  customCtxIndex < customCtxCount;
14924  ++customCtxIndex)
14925  {
14926  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14927  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14928 
14929  if(!pBlockVectorCtx->hasDefragmentationPlan)
14930  {
14931  pBlockVectorCtx->GetBlockVector()->Defragment(
14932  pBlockVectorCtx,
14933  m_pStats, m_Flags,
14934  m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove,
14935  m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove,
14936  VK_NULL_HANDLE);
14937 
14938  if(pBlockVectorCtx->res < VK_SUCCESS)
14939  continue;
14940 
14941  pBlockVectorCtx->hasDefragmentationPlan = true;
14942  }
14943 
14944  const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations(
14945  pBlockVectorCtx,
14946  pCurrentMove, movesLeft);
14947 
14948  movesLeft -= processed;
14949  pCurrentMove += processed;
14950  }
14951 
14952  pInfo->moveCount = pInfo->moveCount - movesLeft;
14953 
14954  return VK_SUCCESS;
14955 }
14956 VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
14957 {
14958  VkResult res = VK_SUCCESS;
14959 
14960  // Process default pools.
14961  for(uint32_t memTypeIndex = 0;
14962  memTypeIndex < m_hAllocator->GetMemoryTypeCount();
14963  ++memTypeIndex)
14964  {
14965  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex];
14966  if(pBlockVectorCtx)
14967  {
14968  VMA_ASSERT(pBlockVectorCtx->GetBlockVector());
14969 
14970  if(!pBlockVectorCtx->hasDefragmentationPlan)
14971  {
14972  res = VK_NOT_READY;
14973  continue;
14974  }
14975 
14976  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14977  pBlockVectorCtx, m_pStats);
14978 
14979  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
14980  res = VK_NOT_READY;
14981  }
14982  }
14983 
14984  // Process custom pools.
14985  for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size();
14986  customCtxIndex < customCtxCount;
14987  ++customCtxIndex)
14988  {
14989  VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex];
14990  VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector());
14991 
14992  if(!pBlockVectorCtx->hasDefragmentationPlan)
14993  {
14994  res = VK_NOT_READY;
14995  continue;
14996  }
14997 
14998  pBlockVectorCtx->GetBlockVector()->CommitDefragmentations(
14999  pBlockVectorCtx, m_pStats);
15000 
15001  if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted)
15002  res = VK_NOT_READY;
15003  }
15004 
15005  return res;
15006 }
15007 
15009 // VmaRecorder
15010 
15011 #if VMA_RECORDING_ENABLED
15012 
15013 VmaRecorder::VmaRecorder() :
15014  m_UseMutex(true),
15015  m_Flags(0),
15016  m_File(VMA_NULL),
15017  m_RecordingStartTime(std::chrono::high_resolution_clock::now())
15018 {
15019 }
15020 
15021 VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex)
15022 {
15023  m_UseMutex = useMutex;
15024  m_Flags = settings.flags;
15025 
15026 #if defined(_WIN32)
15027  // Open file for writing.
15028  errno_t err = fopen_s(&m_File, settings.pFilePath, "wb");
15029 
15030  if(err != 0)
15031  {
15032  return VK_ERROR_INITIALIZATION_FAILED;
15033  }
15034 #else
15035  // Open file for writing.
15036  m_File = fopen(settings.pFilePath, "wb");
15037 
15038  if(m_File == 0)
15039  {
15040  return VK_ERROR_INITIALIZATION_FAILED;
15041  }
15042 #endif
15043 
15044  // Write header.
15045  fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording");
15046  fprintf(m_File, "%s\n", "1,8");
15047 
15048  return VK_SUCCESS;
15049 }
15050 
15051 VmaRecorder::~VmaRecorder()
15052 {
15053  if(m_File != VMA_NULL)
15054  {
15055  fclose(m_File);
15056  }
15057 }
15058 
15059 void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex)
15060 {
15061  CallParams callParams;
15062  GetBasicParams(callParams);
15063 
15064  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15065  fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex);
15066  Flush();
15067 }
15068 
15069 void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex)
15070 {
15071  CallParams callParams;
15072  GetBasicParams(callParams);
15073 
15074  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15075  fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex);
15076  Flush();
15077 }
15078 
15079 void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool)
15080 {
15081  CallParams callParams;
15082  GetBasicParams(callParams);
15083 
15084  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15085  fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex,
15086  createInfo.memoryTypeIndex,
15087  createInfo.flags,
15088  createInfo.blockSize,
15089  (uint64_t)createInfo.minBlockCount,
15090  (uint64_t)createInfo.maxBlockCount,
15091  createInfo.frameInUseCount,
15092  pool);
15093  Flush();
15094 }
15095 
15096 void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool)
15097 {
15098  CallParams callParams;
15099  GetBasicParams(callParams);
15100 
15101  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15102  fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex,
15103  pool);
15104  Flush();
15105 }
15106 
15107 void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex,
15108  const VkMemoryRequirements& vkMemReq,
15109  const VmaAllocationCreateInfo& createInfo,
15110  VmaAllocation allocation)
15111 {
15112  CallParams callParams;
15113  GetBasicParams(callParams);
15114 
15115  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15116  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15117  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15118  vkMemReq.size,
15119  vkMemReq.alignment,
15120  vkMemReq.memoryTypeBits,
15121  createInfo.flags,
15122  createInfo.usage,
15123  createInfo.requiredFlags,
15124  createInfo.preferredFlags,
15125  createInfo.memoryTypeBits,
15126  createInfo.pool,
15127  allocation,
15128  userDataStr.GetString());
15129  Flush();
15130 }
15131 
15132 void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex,
15133  const VkMemoryRequirements& vkMemReq,
15134  const VmaAllocationCreateInfo& createInfo,
15135  uint64_t allocationCount,
15136  const VmaAllocation* pAllocations)
15137 {
15138  CallParams callParams;
15139  GetBasicParams(callParams);
15140 
15141  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15142  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15143  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex,
15144  vkMemReq.size,
15145  vkMemReq.alignment,
15146  vkMemReq.memoryTypeBits,
15147  createInfo.flags,
15148  createInfo.usage,
15149  createInfo.requiredFlags,
15150  createInfo.preferredFlags,
15151  createInfo.memoryTypeBits,
15152  createInfo.pool);
15153  PrintPointerList(allocationCount, pAllocations);
15154  fprintf(m_File, ",%s\n", userDataStr.GetString());
15155  Flush();
15156 }
15157 
15158 void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex,
15159  const VkMemoryRequirements& vkMemReq,
15160  bool requiresDedicatedAllocation,
15161  bool prefersDedicatedAllocation,
15162  const VmaAllocationCreateInfo& createInfo,
15163  VmaAllocation allocation)
15164 {
15165  CallParams callParams;
15166  GetBasicParams(callParams);
15167 
15168  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15169  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15170  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15171  vkMemReq.size,
15172  vkMemReq.alignment,
15173  vkMemReq.memoryTypeBits,
15174  requiresDedicatedAllocation ? 1 : 0,
15175  prefersDedicatedAllocation ? 1 : 0,
15176  createInfo.flags,
15177  createInfo.usage,
15178  createInfo.requiredFlags,
15179  createInfo.preferredFlags,
15180  createInfo.memoryTypeBits,
15181  createInfo.pool,
15182  allocation,
15183  userDataStr.GetString());
15184  Flush();
15185 }
15186 
15187 void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex,
15188  const VkMemoryRequirements& vkMemReq,
15189  bool requiresDedicatedAllocation,
15190  bool prefersDedicatedAllocation,
15191  const VmaAllocationCreateInfo& createInfo,
15192  VmaAllocation allocation)
15193 {
15194  CallParams callParams;
15195  GetBasicParams(callParams);
15196 
15197  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15198  UserDataString userDataStr(createInfo.flags, createInfo.pUserData);
15199  fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15200  vkMemReq.size,
15201  vkMemReq.alignment,
15202  vkMemReq.memoryTypeBits,
15203  requiresDedicatedAllocation ? 1 : 0,
15204  prefersDedicatedAllocation ? 1 : 0,
15205  createInfo.flags,
15206  createInfo.usage,
15207  createInfo.requiredFlags,
15208  createInfo.preferredFlags,
15209  createInfo.memoryTypeBits,
15210  createInfo.pool,
15211  allocation,
15212  userDataStr.GetString());
15213  Flush();
15214 }
15215 
15216 void VmaRecorder::RecordFreeMemory(uint32_t frameIndex,
15217  VmaAllocation allocation)
15218 {
15219  CallParams callParams;
15220  GetBasicParams(callParams);
15221 
15222  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15223  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15224  allocation);
15225  Flush();
15226 }
15227 
15228 void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex,
15229  uint64_t allocationCount,
15230  const VmaAllocation* pAllocations)
15231 {
15232  CallParams callParams;
15233  GetBasicParams(callParams);
15234 
15235  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15236  fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex);
15237  PrintPointerList(allocationCount, pAllocations);
15238  fprintf(m_File, "\n");
15239  Flush();
15240 }
15241 
15242 void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex,
15243  VmaAllocation allocation,
15244  const void* pUserData)
15245 {
15246  CallParams callParams;
15247  GetBasicParams(callParams);
15248 
15249  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15250  UserDataString userDataStr(
15251  allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0,
15252  pUserData);
15253  fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15254  allocation,
15255  userDataStr.GetString());
15256  Flush();
15257 }
15258 
15259 void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex,
15260  VmaAllocation allocation)
15261 {
15262  CallParams callParams;
15263  GetBasicParams(callParams);
15264 
15265  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15266  fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15267  allocation);
15268  Flush();
15269 }
15270 
15271 void VmaRecorder::RecordMapMemory(uint32_t frameIndex,
15272  VmaAllocation allocation)
15273 {
15274  CallParams callParams;
15275  GetBasicParams(callParams);
15276 
15277  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15278  fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15279  allocation);
15280  Flush();
15281 }
15282 
15283 void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex,
15284  VmaAllocation allocation)
15285 {
15286  CallParams callParams;
15287  GetBasicParams(callParams);
15288 
15289  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15290  fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex,
15291  allocation);
15292  Flush();
15293 }
15294 
15295 void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex,
15296  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15297 {
15298  CallParams callParams;
15299  GetBasicParams(callParams);
15300 
15301  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15302  fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15303  allocation,
15304  offset,
15305  size);
15306  Flush();
15307 }
15308 
15309 void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex,
15310  VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
15311 {
15312  CallParams callParams;
15313  GetBasicParams(callParams);
15314 
15315  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15316  fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex,
15317  allocation,
15318  offset,
15319  size);
15320  Flush();
15321 }
15322 
15323 void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex,
15324  const VkBufferCreateInfo& bufCreateInfo,
15325  const VmaAllocationCreateInfo& allocCreateInfo,
15326  VmaAllocation allocation)
15327 {
15328  CallParams callParams;
15329  GetBasicParams(callParams);
15330 
15331  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15332  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15333  fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15334  bufCreateInfo.flags,
15335  bufCreateInfo.size,
15336  bufCreateInfo.usage,
15337  bufCreateInfo.sharingMode,
15338  allocCreateInfo.flags,
15339  allocCreateInfo.usage,
15340  allocCreateInfo.requiredFlags,
15341  allocCreateInfo.preferredFlags,
15342  allocCreateInfo.memoryTypeBits,
15343  allocCreateInfo.pool,
15344  allocation,
15345  userDataStr.GetString());
15346  Flush();
15347 }
15348 
15349 void VmaRecorder::RecordCreateImage(uint32_t frameIndex,
15350  const VkImageCreateInfo& imageCreateInfo,
15351  const VmaAllocationCreateInfo& allocCreateInfo,
15352  VmaAllocation allocation)
15353 {
15354  CallParams callParams;
15355  GetBasicParams(callParams);
15356 
15357  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15358  UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData);
15359  fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15360  imageCreateInfo.flags,
15361  imageCreateInfo.imageType,
15362  imageCreateInfo.format,
15363  imageCreateInfo.extent.width,
15364  imageCreateInfo.extent.height,
15365  imageCreateInfo.extent.depth,
15366  imageCreateInfo.mipLevels,
15367  imageCreateInfo.arrayLayers,
15368  imageCreateInfo.samples,
15369  imageCreateInfo.tiling,
15370  imageCreateInfo.usage,
15371  imageCreateInfo.sharingMode,
15372  imageCreateInfo.initialLayout,
15373  allocCreateInfo.flags,
15374  allocCreateInfo.usage,
15375  allocCreateInfo.requiredFlags,
15376  allocCreateInfo.preferredFlags,
15377  allocCreateInfo.memoryTypeBits,
15378  allocCreateInfo.pool,
15379  allocation,
15380  userDataStr.GetString());
15381  Flush();
15382 }
15383 
15384 void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex,
15385  VmaAllocation allocation)
15386 {
15387  CallParams callParams;
15388  GetBasicParams(callParams);
15389 
15390  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15391  fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex,
15392  allocation);
15393  Flush();
15394 }
15395 
15396 void VmaRecorder::RecordDestroyImage(uint32_t frameIndex,
15397  VmaAllocation allocation)
15398 {
15399  CallParams callParams;
15400  GetBasicParams(callParams);
15401 
15402  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15403  fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex,
15404  allocation);
15405  Flush();
15406 }
15407 
15408 void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex,
15409  VmaAllocation allocation)
15410 {
15411  CallParams callParams;
15412  GetBasicParams(callParams);
15413 
15414  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15415  fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex,
15416  allocation);
15417  Flush();
15418 }
15419 
15420 void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex,
15421  VmaAllocation allocation)
15422 {
15423  CallParams callParams;
15424  GetBasicParams(callParams);
15425 
15426  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15427  fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex,
15428  allocation);
15429  Flush();
15430 }
15431 
15432 void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex,
15433  VmaPool pool)
15434 {
15435  CallParams callParams;
15436  GetBasicParams(callParams);
15437 
15438  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15439  fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex,
15440  pool);
15441  Flush();
15442 }
15443 
15444 void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex,
15445  const VmaDefragmentationInfo2& info,
15447 {
15448  CallParams callParams;
15449  GetBasicParams(callParams);
15450 
15451  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15452  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex,
15453  info.flags);
15454  PrintPointerList(info.allocationCount, info.pAllocations);
15455  fprintf(m_File, ",");
15456  PrintPointerList(info.poolCount, info.pPools);
15457  fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n",
15458  info.maxCpuBytesToMove,
15460  info.maxGpuBytesToMove,
15462  info.commandBuffer,
15463  ctx);
15464  Flush();
15465 }
15466 
15467 void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex,
15469 {
15470  CallParams callParams;
15471  GetBasicParams(callParams);
15472 
15473  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15474  fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex,
15475  ctx);
15476  Flush();
15477 }
15478 
15479 void VmaRecorder::RecordSetPoolName(uint32_t frameIndex,
15480  VmaPool pool,
15481  const char* name)
15482 {
15483  CallParams callParams;
15484  GetBasicParams(callParams);
15485 
15486  VmaMutexLock lock(m_FileMutex, m_UseMutex);
15487  fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex,
15488  pool, name != VMA_NULL ? name : "");
15489  Flush();
15490 }
15491 
15492 VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData)
15493 {
15494  if(pUserData != VMA_NULL)
15495  {
15496  if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0)
15497  {
15498  m_Str = (const char*)pUserData;
15499  }
15500  else
15501  {
15502  // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it.
15503  snprintf(m_PtrStr, 17, "%p", pUserData);
15504  m_Str = m_PtrStr;
15505  }
15506  }
15507  else
15508  {
15509  m_Str = "";
15510  }
15511 }
15512 
15513 void VmaRecorder::WriteConfiguration(
15514  const VkPhysicalDeviceProperties& devProps,
15515  const VkPhysicalDeviceMemoryProperties& memProps,
15516  uint32_t vulkanApiVersion,
15517  bool dedicatedAllocationExtensionEnabled,
15518  bool bindMemory2ExtensionEnabled,
15519  bool memoryBudgetExtensionEnabled,
15520  bool deviceCoherentMemoryExtensionEnabled)
15521 {
15522  fprintf(m_File, "Config,Begin\n");
15523 
15524  fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion));
15525 
15526  fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion);
15527  fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion);
15528  fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID);
15529  fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID);
15530  fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType);
15531  fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName);
15532 
15533  fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount);
15534  fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity);
15535  fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize);
15536 
15537  fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount);
15538  for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i)
15539  {
15540  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size);
15541  fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags);
15542  }
15543  fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount);
15544  for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i)
15545  {
15546  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex);
15547  fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags);
15548  }
15549 
15550  fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0);
15551  fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0);
15552  fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0);
15553  fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0);
15554 
15555  fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0);
15556  fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT);
15557  fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN);
15558  fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0);
15559  fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0);
15560  fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0);
15561  fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY);
15562  fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE);
15563  fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15564 
15565  fprintf(m_File, "Config,End\n");
15566 }
15567 
15568 void VmaRecorder::GetBasicParams(CallParams& outParams)
15569 {
15570  #if defined(_WIN32)
15571  outParams.threadId = GetCurrentThreadId();
15572  #else
15573  // Use C++11 features to get thread id and convert it to uint32_t.
15574  // There is room for optimization since sstream is quite slow.
15575  // Is there a better way to convert std::this_thread::get_id() to uint32_t?
15576  std::thread::id thread_id = std::this_thread::get_id();
15577  stringstream thread_id_to_string_converter;
15578  thread_id_to_string_converter << thread_id;
15579  string thread_id_as_string = thread_id_to_string_converter.str();
15580  outParams.threadId = static_cast<uint32_t>(std::stoi(thread_id_as_string.c_str()));
15581  #endif
15582 
15583  auto current_time = std::chrono::high_resolution_clock::now();
15584 
15585  outParams.time = std::chrono::duration<double, std::chrono::seconds::period>(current_time - m_RecordingStartTime).count();
15586 }
15587 
15588 void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems)
15589 {
15590  if(count)
15591  {
15592  fprintf(m_File, "%p", pItems[0]);
15593  for(uint64_t i = 1; i < count; ++i)
15594  {
15595  fprintf(m_File, " %p", pItems[i]);
15596  }
15597  }
15598 }
15599 
15600 void VmaRecorder::Flush()
15601 {
15602  if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0)
15603  {
15604  fflush(m_File);
15605  }
15606 }
15607 
15608 #endif // #if VMA_RECORDING_ENABLED
15609 
15611 // VmaAllocationObjectAllocator
15612 
15613 VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) :
15614  m_Allocator(pAllocationCallbacks, 1024)
15615 {
15616 }
15617 
15618 template<typename... Types> VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args)
15619 {
15620  VmaMutexLock mutexLock(m_Mutex);
15621  return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
15622 }
15623 
15624 void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
15625 {
15626  VmaMutexLock mutexLock(m_Mutex);
15627  m_Allocator.Free(hAlloc);
15628 }
15629 
15631 // VmaAllocator_T
15632 
15633 VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
15634  m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
15635  m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
15636  m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
15637  m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
15638  m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
15639  m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
15640  m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
15641  m_hDevice(pCreateInfo->device),
15642  m_hInstance(pCreateInfo->instance),
15643  m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
15644  m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
15645  *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
15646  m_AllocationObjectAllocator(&m_AllocationCallbacks),
15647  m_HeapSizeLimitMask(0),
15648  m_PreferredLargeHeapBlockSize(0),
15649  m_PhysicalDevice(pCreateInfo->physicalDevice),
15650  m_CurrentFrameIndex(0),
15651  m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
15652  m_Pools(VmaStlAllocator<VmaPool>(GetAllocationCallbacks())),
15653  m_NextPoolId(0),
15654  m_GlobalMemoryTypeBits(UINT32_MAX)
15656  ,m_pRecorder(VMA_NULL)
15657 #endif
15658 {
15659  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15660  {
15661  m_UseKhrDedicatedAllocation = false;
15662  m_UseKhrBindMemory2 = false;
15663  }
15664 
15665  if(VMA_DEBUG_DETECT_CORRUPTION)
15666  {
15667  // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
15668  VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
15669  }
15670 
15671  VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
15672 
15673  if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
15674  {
15675 #if !(VMA_DEDICATED_ALLOCATION)
15677  {
15678  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
15679  }
15680 #endif
15681 #if !(VMA_BIND_MEMORY2)
15682  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
15683  {
15684  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
15685  }
15686 #endif
15687  }
15688 #if !(VMA_MEMORY_BUDGET)
15689  if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
15690  {
15691  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
15692  }
15693 #endif
15694 #if !(VMA_BUFFER_DEVICE_ADDRESS)
15695  if(m_UseKhrBufferDeviceAddress)
15696  {
15697  VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
15698  }
15699 #endif
15700 #if VMA_VULKAN_VERSION < 1002000
15701  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
15702  {
15703  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
15704  }
15705 #endif
15706 #if VMA_VULKAN_VERSION < 1001000
15707  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15708  {
15709  VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
15710  }
15711 #endif
15712 
15713  memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
15714  memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
15715  memset(&m_MemProps, 0, sizeof(m_MemProps));
15716 
15717  memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
15718  memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations));
15719  memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
15720 
15721  if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
15722  {
15723  m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
15724  m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
15725  m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
15726  }
15727 
15728  ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
15729 
15730  (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
15731  (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
15732 
15733  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT));
15734  VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
15735  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
15736  VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
15737 
15738  m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
15739  pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
15740 
15741  m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
15742 
15743  if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
15744  {
15745  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
15746  {
15747  const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
15748  if(limit != VK_WHOLE_SIZE)
15749  {
15750  m_HeapSizeLimitMask |= 1u << heapIndex;
15751  if(limit < m_MemProps.memoryHeaps[heapIndex].size)
15752  {
15753  m_MemProps.memoryHeaps[heapIndex].size = limit;
15754  }
15755  }
15756  }
15757  }
15758 
15759  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
15760  {
15761  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
15762 
15763  m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
15764  this,
15765  VK_NULL_HANDLE, // hParentPool
15766  memTypeIndex,
15767  preferredBlockSize,
15768  0,
15769  SIZE_MAX,
15770  GetBufferImageGranularity(),
15771  pCreateInfo->frameInUseCount,
15772  false, // explicitBlockSize
15773  false); // linearAlgorithm
15774  // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
15775  // becase minBlockCount is 0.
15776  m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator<VmaAllocation>(GetAllocationCallbacks()));
15777 
15778  }
15779 }
15780 
15781 VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
15782 {
15783  VkResult res = VK_SUCCESS;
15784 
15785  if(pCreateInfo->pRecordSettings != VMA_NULL &&
15786  !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath))
15787  {
15788 #if VMA_RECORDING_ENABLED
15789  m_pRecorder = vma_new(this, VmaRecorder)();
15790  res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex);
15791  if(res != VK_SUCCESS)
15792  {
15793  return res;
15794  }
15795  m_pRecorder->WriteConfiguration(
15796  m_PhysicalDeviceProperties,
15797  m_MemProps,
15798  m_VulkanApiVersion,
15799  m_UseKhrDedicatedAllocation,
15800  m_UseKhrBindMemory2,
15801  m_UseExtMemoryBudget,
15802  m_UseAmdDeviceCoherentMemory);
15803  m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex());
15804 #else
15805  VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1.");
15806  return VK_ERROR_FEATURE_NOT_PRESENT;
15807 #endif
15808  }
15809 
15810 #if VMA_MEMORY_BUDGET
15811  if(m_UseExtMemoryBudget)
15812  {
15813  UpdateVulkanBudget();
15814  }
15815 #endif // #if VMA_MEMORY_BUDGET
15816 
15817  return res;
15818 }
15819 
15820 VmaAllocator_T::~VmaAllocator_T()
15821 {
15822 #if VMA_RECORDING_ENABLED
15823  if(m_pRecorder != VMA_NULL)
15824  {
15825  m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex());
15826  vma_delete(this, m_pRecorder);
15827  }
15828 #endif
15829 
15830  VMA_ASSERT(m_Pools.empty());
15831 
15832  for(size_t i = GetMemoryTypeCount(); i--; )
15833  {
15834  if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty())
15835  {
15836  VMA_ASSERT(0 && "Unfreed dedicated allocations found.");
15837  }
15838 
15839  vma_delete(this, m_pDedicatedAllocations[i]);
15840  vma_delete(this, m_pBlockVectors[i]);
15841  }
15842 }
15843 
15844 void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
15845 {
15846 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15847  ImportVulkanFunctions_Static();
15848 #endif
15849 
15850  if(pVulkanFunctions != VMA_NULL)
15851  {
15852  ImportVulkanFunctions_Custom(pVulkanFunctions);
15853  }
15854 
15855 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15856  ImportVulkanFunctions_Dynamic();
15857 #endif
15858 
15859  ValidateVulkanFunctions();
15860 }
15861 
15862 #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15863 
15864 void VmaAllocator_T::ImportVulkanFunctions_Static()
15865 {
15866  // Vulkan 1.0
15867  m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
15868  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
15869  m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
15870  m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
15871  m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
15872  m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
15873  m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
15874  m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
15875  m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
15876  m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
15877  m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
15878  m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
15879  m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
15880  m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
15881  m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
15882  m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
15883  m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
15884 
15885  // Vulkan 1.1
15886 #if VMA_VULKAN_VERSION >= 1001000
15887  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15888  {
15889  m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
15890  m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
15891  m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
15892  m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
15893  m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
15894  }
15895 #endif
15896 }
15897 
15898 #endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1
15899 
15900 void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
15901 {
15902  VMA_ASSERT(pVulkanFunctions != VMA_NULL);
15903 
15904 #define VMA_COPY_IF_NOT_NULL(funcName) \
15905  if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
15906 
15907  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
15908  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
15909  VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
15910  VMA_COPY_IF_NOT_NULL(vkFreeMemory);
15911  VMA_COPY_IF_NOT_NULL(vkMapMemory);
15912  VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
15913  VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
15914  VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
15915  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
15916  VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
15917  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
15918  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
15919  VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
15920  VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
15921  VMA_COPY_IF_NOT_NULL(vkCreateImage);
15922  VMA_COPY_IF_NOT_NULL(vkDestroyImage);
15923  VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
15924 
15925 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
15926  VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
15927  VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
15928 #endif
15929 
15930 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
15931  VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
15932  VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
15933 #endif
15934 
15935 #if VMA_MEMORY_BUDGET
15936  VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
15937 #endif
15938 
15939 #undef VMA_COPY_IF_NOT_NULL
15940 }
15941 
15942 #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
15943 
15944 void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
15945 {
15946 #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
15947  if(m_VulkanFunctions.memberName == VMA_NULL) \
15948  m_VulkanFunctions.memberName = \
15949  (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString);
15950 #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
15951  if(m_VulkanFunctions.memberName == VMA_NULL) \
15952  m_VulkanFunctions.memberName = \
15953  (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString);
15954 
15955  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
15956  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
15957  VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
15958  VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
15959  VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
15960  VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
15961  VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
15962  VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
15963  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
15964  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
15965  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
15966  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
15967  VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
15968  VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
15969  VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
15970  VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
15971  VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
15972 
15973 #if VMA_VULKAN_VERSION >= 1001000
15974  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
15975  {
15976  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
15977  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
15978  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
15979  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
15980  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
15981  }
15982 #endif
15983 
15984 #if VMA_DEDICATED_ALLOCATION
15985  if(m_UseKhrDedicatedAllocation)
15986  {
15987  VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
15988  VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
15989  }
15990 #endif
15991 
15992 #if VMA_BIND_MEMORY2
15993  if(m_UseKhrBindMemory2)
15994  {
15995  VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
15996  VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
15997  }
15998 #endif // #if VMA_BIND_MEMORY2
15999 
16000 #if VMA_MEMORY_BUDGET
16001  if(m_UseExtMemoryBudget)
16002  {
16003  VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
16004  }
16005 #endif // #if VMA_MEMORY_BUDGET
16006 
16007 #undef VMA_FETCH_DEVICE_FUNC
16008 #undef VMA_FETCH_INSTANCE_FUNC
16009 }
16010 
16011 #endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
16012 
16013 void VmaAllocator_T::ValidateVulkanFunctions()
16014 {
16015  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
16016  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
16017  VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
16018  VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
16019  VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
16020  VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
16021  VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
16022  VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
16023  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
16024  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
16025  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
16026  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
16027  VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
16028  VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
16029  VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
16030  VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
16031  VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
16032 
16033 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16034  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
16035  {
16036  VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
16037  VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
16038  }
16039 #endif
16040 
16041 #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
16042  if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
16043  {
16044  VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
16045  VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
16046  }
16047 #endif
16048 
16049 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
16050  if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16051  {
16052  VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
16053  }
16054 #endif
16055 }
16056 
16057 VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
16058 {
16059  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16060  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
16061  const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
16062  return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
16063 }
16064 
16065 VkResult VmaAllocator_T::AllocateMemoryOfType(
16066  VkDeviceSize size,
16067  VkDeviceSize alignment,
16068  bool dedicatedAllocation,
16069  VkBuffer dedicatedBuffer,
16070  VkBufferUsageFlags dedicatedBufferUsage,
16071  VkImage dedicatedImage,
16072  const VmaAllocationCreateInfo& createInfo,
16073  uint32_t memTypeIndex,
16074  VmaSuballocationType suballocType,
16075  size_t allocationCount,
16076  VmaAllocation* pAllocations)
16077 {
16078  VMA_ASSERT(pAllocations != VMA_NULL);
16079  VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
16080 
16081  VmaAllocationCreateInfo finalCreateInfo = createInfo;
16082 
16083  // If memory type is not HOST_VISIBLE, disable MAPPED.
16084  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16085  (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16086  {
16087  finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16088  }
16089  // If memory is lazily allocated, it should be always dedicated.
16090  if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
16091  {
16093  }
16094 
16095  VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex];
16096  VMA_ASSERT(blockVector);
16097 
16098  const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize();
16099  bool preferDedicatedMemory =
16100  VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ||
16101  dedicatedAllocation ||
16102  // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
16103  size > preferredBlockSize / 2;
16104 
16105  if(preferDedicatedMemory &&
16106  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
16107  finalCreateInfo.pool == VK_NULL_HANDLE)
16108  {
16110  }
16111 
16112  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
16113  {
16114  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16115  {
16116  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16117  }
16118  else
16119  {
16120  return AllocateDedicatedMemory(
16121  size,
16122  suballocType,
16123  memTypeIndex,
16124  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16125  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16126  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16127  finalCreateInfo.pUserData,
16128  dedicatedBuffer,
16129  dedicatedBufferUsage,
16130  dedicatedImage,
16131  allocationCount,
16132  pAllocations);
16133  }
16134  }
16135  else
16136  {
16137  VkResult res = blockVector->Allocate(
16138  m_CurrentFrameIndex.load(),
16139  size,
16140  alignment,
16141  finalCreateInfo,
16142  suballocType,
16143  allocationCount,
16144  pAllocations);
16145  if(res == VK_SUCCESS)
16146  {
16147  return res;
16148  }
16149 
16150  // 5. Try dedicated memory.
16151  if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16152  {
16153  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16154  }
16155  else
16156  {
16157  res = AllocateDedicatedMemory(
16158  size,
16159  suballocType,
16160  memTypeIndex,
16161  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0,
16162  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
16163  (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
16164  finalCreateInfo.pUserData,
16165  dedicatedBuffer,
16166  dedicatedBufferUsage,
16167  dedicatedImage,
16168  allocationCount,
16169  pAllocations);
16170  if(res == VK_SUCCESS)
16171  {
16172  // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here.
16173  VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
16174  return VK_SUCCESS;
16175  }
16176  else
16177  {
16178  // Everything failed: Return error code.
16179  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16180  return res;
16181  }
16182  }
16183  }
16184 }
16185 
16186 VkResult VmaAllocator_T::AllocateDedicatedMemory(
16187  VkDeviceSize size,
16188  VmaSuballocationType suballocType,
16189  uint32_t memTypeIndex,
16190  bool withinBudget,
16191  bool map,
16192  bool isUserDataString,
16193  void* pUserData,
16194  VkBuffer dedicatedBuffer,
16195  VkBufferUsageFlags dedicatedBufferUsage,
16196  VkImage dedicatedImage,
16197  size_t allocationCount,
16198  VmaAllocation* pAllocations)
16199 {
16200  VMA_ASSERT(allocationCount > 0 && pAllocations);
16201 
16202  if(withinBudget)
16203  {
16204  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16205  VmaBudget heapBudget = {};
16206  GetBudget(&heapBudget, heapIndex, 1);
16207  if(heapBudget.usage + size * allocationCount > heapBudget.budget)
16208  {
16209  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16210  }
16211  }
16212 
16213  VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
16214  allocInfo.memoryTypeIndex = memTypeIndex;
16215  allocInfo.allocationSize = size;
16216 
16217 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16218  VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
16219  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16220  {
16221  if(dedicatedBuffer != VK_NULL_HANDLE)
16222  {
16223  VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
16224  dedicatedAllocInfo.buffer = dedicatedBuffer;
16225  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16226  }
16227  else if(dedicatedImage != VK_NULL_HANDLE)
16228  {
16229  dedicatedAllocInfo.image = dedicatedImage;
16230  VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
16231  }
16232  }
16233 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16234 
16235 #if VMA_BUFFER_DEVICE_ADDRESS
16236  VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
16237  if(m_UseKhrBufferDeviceAddress)
16238  {
16239  bool canContainBufferWithDeviceAddress = true;
16240  if(dedicatedBuffer != VK_NULL_HANDLE)
16241  {
16242  canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown
16243  (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
16244  }
16245  else if(dedicatedImage != VK_NULL_HANDLE)
16246  {
16247  canContainBufferWithDeviceAddress = false;
16248  }
16249  if(canContainBufferWithDeviceAddress)
16250  {
16251  allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
16252  VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
16253  }
16254  }
16255 #endif // #if VMA_BUFFER_DEVICE_ADDRESS
16256 
16257  size_t allocIndex;
16258  VkResult res = VK_SUCCESS;
16259  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16260  {
16261  res = AllocateDedicatedMemoryPage(
16262  size,
16263  suballocType,
16264  memTypeIndex,
16265  allocInfo,
16266  map,
16267  isUserDataString,
16268  pUserData,
16269  pAllocations + allocIndex);
16270  if(res != VK_SUCCESS)
16271  {
16272  break;
16273  }
16274  }
16275 
16276  if(res == VK_SUCCESS)
16277  {
16278  // Register them in m_pDedicatedAllocations.
16279  {
16280  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16281  AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
16282  VMA_ASSERT(pDedicatedAllocations);
16283  for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
16284  {
16285  VmaVectorInsertSorted<VmaPointerLess>(*pDedicatedAllocations, pAllocations[allocIndex]);
16286  }
16287  }
16288 
16289  VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
16290  }
16291  else
16292  {
16293  // Free all already created allocations.
16294  while(allocIndex--)
16295  {
16296  VmaAllocation currAlloc = pAllocations[allocIndex];
16297  VkDeviceMemory hMemory = currAlloc->GetMemory();
16298 
16299  /*
16300  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
16301  before vkFreeMemory.
16302 
16303  if(currAlloc->GetMappedData() != VMA_NULL)
16304  {
16305  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
16306  }
16307  */
16308 
16309  FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
16310  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
16311  currAlloc->SetUserData(this, VMA_NULL);
16312  m_AllocationObjectAllocator.Free(currAlloc);
16313  }
16314 
16315  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16316  }
16317 
16318  return res;
16319 }
16320 
16321 VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
16322  VkDeviceSize size,
16323  VmaSuballocationType suballocType,
16324  uint32_t memTypeIndex,
16325  const VkMemoryAllocateInfo& allocInfo,
16326  bool map,
16327  bool isUserDataString,
16328  void* pUserData,
16329  VmaAllocation* pAllocation)
16330 {
16331  VkDeviceMemory hMemory = VK_NULL_HANDLE;
16332  VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
16333  if(res < 0)
16334  {
16335  VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
16336  return res;
16337  }
16338 
16339  void* pMappedData = VMA_NULL;
16340  if(map)
16341  {
16342  res = (*m_VulkanFunctions.vkMapMemory)(
16343  m_hDevice,
16344  hMemory,
16345  0,
16346  VK_WHOLE_SIZE,
16347  0,
16348  &pMappedData);
16349  if(res < 0)
16350  {
16351  VMA_DEBUG_LOG(" vkMapMemory FAILED");
16352  FreeVulkanMemory(memTypeIndex, size, hMemory);
16353  return res;
16354  }
16355  }
16356 
16357  *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString);
16358  (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size);
16359  (*pAllocation)->SetUserData(this, pUserData);
16360  m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
16361  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16362  {
16363  FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
16364  }
16365 
16366  return VK_SUCCESS;
16367 }
16368 
16369 void VmaAllocator_T::GetBufferMemoryRequirements(
16370  VkBuffer hBuffer,
16371  VkMemoryRequirements& memReq,
16372  bool& requiresDedicatedAllocation,
16373  bool& prefersDedicatedAllocation) const
16374 {
16375 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16376  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16377  {
16378  VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
16379  memReqInfo.buffer = hBuffer;
16380 
16381  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16382 
16383  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16384  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16385 
16386  (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16387 
16388  memReq = memReq2.memoryRequirements;
16389  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16390  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16391  }
16392  else
16393 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16394  {
16395  (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
16396  requiresDedicatedAllocation = false;
16397  prefersDedicatedAllocation = false;
16398  }
16399 }
16400 
16401 void VmaAllocator_T::GetImageMemoryRequirements(
16402  VkImage hImage,
16403  VkMemoryRequirements& memReq,
16404  bool& requiresDedicatedAllocation,
16405  bool& prefersDedicatedAllocation) const
16406 {
16407 #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16408  if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
16409  {
16410  VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
16411  memReqInfo.image = hImage;
16412 
16413  VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
16414 
16415  VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
16416  VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
16417 
16418  (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
16419 
16420  memReq = memReq2.memoryRequirements;
16421  requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
16422  prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
16423  }
16424  else
16425 #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
16426  {
16427  (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
16428  requiresDedicatedAllocation = false;
16429  prefersDedicatedAllocation = false;
16430  }
16431 }
16432 
16433 VkResult VmaAllocator_T::AllocateMemory(
16434  const VkMemoryRequirements& vkMemReq,
16435  bool requiresDedicatedAllocation,
16436  bool prefersDedicatedAllocation,
16437  VkBuffer dedicatedBuffer,
16438  VkBufferUsageFlags dedicatedBufferUsage,
16439  VkImage dedicatedImage,
16440  const VmaAllocationCreateInfo& createInfo,
16441  VmaSuballocationType suballocType,
16442  size_t allocationCount,
16443  VmaAllocation* pAllocations)
16444 {
16445  memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
16446 
16447  VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
16448 
16449  if(vkMemReq.size == 0)
16450  {
16451  return VK_ERROR_VALIDATION_FAILED_EXT;
16452  }
16453  if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
16454  (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16455  {
16456  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
16457  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16458  }
16459  if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16461  {
16462  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid.");
16463  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16464  }
16465  if(requiresDedicatedAllocation)
16466  {
16467  if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
16468  {
16469  VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required.");
16470  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16471  }
16472  if(createInfo.pool != VK_NULL_HANDLE)
16473  {
16474  VMA_ASSERT(0 && "Pool specified while dedicated allocation is required.");
16475  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16476  }
16477  }
16478  if((createInfo.pool != VK_NULL_HANDLE) &&
16479  ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0))
16480  {
16481  VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid.");
16482  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16483  }
16484 
16485  if(createInfo.pool != VK_NULL_HANDLE)
16486  {
16487  const VkDeviceSize alignmentForPool = VMA_MAX(
16488  vkMemReq.alignment,
16489  GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex()));
16490 
16491  VmaAllocationCreateInfo createInfoForPool = createInfo;
16492  // If memory type is not HOST_VISIBLE, disable MAPPED.
16493  if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
16494  (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
16495  {
16496  createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
16497  }
16498 
16499  return createInfo.pool->m_BlockVector.Allocate(
16500  m_CurrentFrameIndex.load(),
16501  vkMemReq.size,
16502  alignmentForPool,
16503  createInfoForPool,
16504  suballocType,
16505  allocationCount,
16506  pAllocations);
16507  }
16508  else
16509  {
16510  // Bit mask of memory Vulkan types acceptable for this allocation.
16511  uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
16512  uint32_t memTypeIndex = UINT32_MAX;
16513  VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16514  if(res == VK_SUCCESS)
16515  {
16516  VkDeviceSize alignmentForMemType = VMA_MAX(
16517  vkMemReq.alignment,
16518  GetMemoryTypeMinAlignment(memTypeIndex));
16519 
16520  res = AllocateMemoryOfType(
16521  vkMemReq.size,
16522  alignmentForMemType,
16523  requiresDedicatedAllocation || prefersDedicatedAllocation,
16524  dedicatedBuffer,
16525  dedicatedBufferUsage,
16526  dedicatedImage,
16527  createInfo,
16528  memTypeIndex,
16529  suballocType,
16530  allocationCount,
16531  pAllocations);
16532  // Succeeded on first try.
16533  if(res == VK_SUCCESS)
16534  {
16535  return res;
16536  }
16537  // Allocation from this memory type failed. Try other compatible memory types.
16538  else
16539  {
16540  for(;;)
16541  {
16542  // Remove old memTypeIndex from list of possibilities.
16543  memoryTypeBits &= ~(1u << memTypeIndex);
16544  // Find alternative memTypeIndex.
16545  res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex);
16546  if(res == VK_SUCCESS)
16547  {
16548  alignmentForMemType = VMA_MAX(
16549  vkMemReq.alignment,
16550  GetMemoryTypeMinAlignment(memTypeIndex));
16551 
16552  res = AllocateMemoryOfType(
16553  vkMemReq.size,
16554  alignmentForMemType,
16555  requiresDedicatedAllocation || prefersDedicatedAllocation,
16556  dedicatedBuffer,
16557  dedicatedBufferUsage,
16558  dedicatedImage,
16559  createInfo,
16560  memTypeIndex,
16561  suballocType,
16562  allocationCount,
16563  pAllocations);
16564  // Allocation from this alternative memory type succeeded.
16565  if(res == VK_SUCCESS)
16566  {
16567  return res;
16568  }
16569  // else: Allocation from this memory type failed. Try next one - next loop iteration.
16570  }
16571  // No other matching memory type index could be found.
16572  else
16573  {
16574  // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
16575  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
16576  }
16577  }
16578  }
16579  }
16580  // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
16581  else
16582  return res;
16583  }
16584 }
16585 
16586 void VmaAllocator_T::FreeMemory(
16587  size_t allocationCount,
16588  const VmaAllocation* pAllocations)
16589 {
16590  VMA_ASSERT(pAllocations);
16591 
16592  for(size_t allocIndex = allocationCount; allocIndex--; )
16593  {
16594  VmaAllocation allocation = pAllocations[allocIndex];
16595 
16596  if(allocation != VK_NULL_HANDLE)
16597  {
16598  if(TouchAllocation(allocation))
16599  {
16600  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
16601  {
16602  FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
16603  }
16604 
16605  switch(allocation->GetType())
16606  {
16607  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
16608  {
16609  VmaBlockVector* pBlockVector = VMA_NULL;
16610  VmaPool hPool = allocation->GetBlock()->GetParentPool();
16611  if(hPool != VK_NULL_HANDLE)
16612  {
16613  pBlockVector = &hPool->m_BlockVector;
16614  }
16615  else
16616  {
16617  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
16618  pBlockVector = m_pBlockVectors[memTypeIndex];
16619  }
16620  pBlockVector->Free(allocation);
16621  }
16622  break;
16623  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
16624  FreeDedicatedMemory(allocation);
16625  break;
16626  default:
16627  VMA_ASSERT(0);
16628  }
16629  }
16630 
16631  // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes.
16632  m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
16633  allocation->SetUserData(this, VMA_NULL);
16634  m_AllocationObjectAllocator.Free(allocation);
16635  }
16636  }
16637 }
16638 
16639 VkResult VmaAllocator_T::ResizeAllocation(
16640  const VmaAllocation alloc,
16641  VkDeviceSize newSize)
16642 {
16643  // This function is deprecated and so it does nothing. It's left for backward compatibility.
16644  if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST)
16645  {
16646  return VK_ERROR_VALIDATION_FAILED_EXT;
16647  }
16648  if(newSize == alloc->GetSize())
16649  {
16650  return VK_SUCCESS;
16651  }
16652  return VK_ERROR_OUT_OF_POOL_MEMORY;
16653 }
16654 
16655 void VmaAllocator_T::CalculateStats(VmaStats* pStats)
16656 {
16657  // Initialize.
16658  InitStatInfo(pStats->total);
16659  for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
16660  InitStatInfo(pStats->memoryType[i]);
16661  for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
16662  InitStatInfo(pStats->memoryHeap[i]);
16663 
16664  // Process default pools.
16665  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16666  {
16667  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
16668  VMA_ASSERT(pBlockVector);
16669  pBlockVector->AddStats(pStats);
16670  }
16671 
16672  // Process custom pools.
16673  {
16674  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
16675  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
16676  {
16677  m_Pools[poolIndex]->m_BlockVector.AddStats(pStats);
16678  }
16679  }
16680 
16681  // Process dedicated allocations.
16682  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
16683  {
16684  const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
16685  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
16686  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
16687  VMA_ASSERT(pDedicatedAllocVector);
16688  for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex)
16689  {
16690  VmaStatInfo allocationStatInfo;
16691  (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo);
16692  VmaAddStatInfo(pStats->total, allocationStatInfo);
16693  VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo);
16694  VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo);
16695  }
16696  }
16697 
16698  // Postprocess.
16699  VmaPostprocessCalcStatInfo(pStats->total);
16700  for(size_t i = 0; i < GetMemoryTypeCount(); ++i)
16701  VmaPostprocessCalcStatInfo(pStats->memoryType[i]);
16702  for(size_t i = 0; i < GetMemoryHeapCount(); ++i)
16703  VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]);
16704 }
16705 
16706 void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount)
16707 {
16708 #if VMA_MEMORY_BUDGET
16709  if(m_UseExtMemoryBudget)
16710  {
16711  if(m_Budget.m_OperationsSinceBudgetFetch < 30)
16712  {
16713  VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
16714  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16715  {
16716  const uint32_t heapIndex = firstHeap + i;
16717 
16718  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16719  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16720 
16721  if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
16722  {
16723  outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] +
16724  outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
16725  }
16726  else
16727  {
16728  outBudget->usage = 0;
16729  }
16730 
16731  // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
16732  outBudget->budget = VMA_MIN(
16733  m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
16734  }
16735  }
16736  else
16737  {
16738  UpdateVulkanBudget(); // Outside of mutex lock
16739  GetBudget(outBudget, firstHeap, heapCount); // Recursion
16740  }
16741  }
16742  else
16743 #endif
16744  {
16745  for(uint32_t i = 0; i < heapCount; ++i, ++outBudget)
16746  {
16747  const uint32_t heapIndex = firstHeap + i;
16748 
16749  outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex];
16750  outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
16751 
16752  outBudget->usage = outBudget->blockBytes;
16753  outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
16754  }
16755  }
16756 }
16757 
16758 static const uint32_t VMA_VENDOR_ID_AMD = 4098;
16759 
16760 VkResult VmaAllocator_T::DefragmentationBegin(
16761  const VmaDefragmentationInfo2& info,
16762  VmaDefragmentationStats* pStats,
16763  VmaDefragmentationContext* pContext)
16764 {
16765  if(info.pAllocationsChanged != VMA_NULL)
16766  {
16767  memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32));
16768  }
16769 
16770  *pContext = vma_new(this, VmaDefragmentationContext_T)(
16771  this, m_CurrentFrameIndex.load(), info.flags, pStats);
16772 
16773  (*pContext)->AddPools(info.poolCount, info.pPools);
16774  (*pContext)->AddAllocations(
16776 
16777  VkResult res = (*pContext)->Defragment(
16780  info.commandBuffer, pStats, info.flags);
16781 
16782  if(res != VK_NOT_READY)
16783  {
16784  vma_delete(this, *pContext);
16785  *pContext = VMA_NULL;
16786  }
16787 
16788  return res;
16789 }
16790 
16791 VkResult VmaAllocator_T::DefragmentationEnd(
16792  VmaDefragmentationContext context)
16793 {
16794  vma_delete(this, context);
16795  return VK_SUCCESS;
16796 }
16797 
16798 VkResult VmaAllocator_T::DefragmentationPassBegin(
16800  VmaDefragmentationContext context)
16801 {
16802  return context->DefragmentPassBegin(pInfo);
16803 }
16804 VkResult VmaAllocator_T::DefragmentationPassEnd(
16805  VmaDefragmentationContext context)
16806 {
16807  return context->DefragmentPassEnd();
16808 
16809 }
16810 
16811 void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
16812 {
16813  if(hAllocation->CanBecomeLost())
16814  {
16815  /*
16816  Warning: This is a carefully designed algorithm.
16817  Do not modify unless you really know what you're doing :)
16818  */
16819  const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16820  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16821  for(;;)
16822  {
16823  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16824  {
16825  pAllocationInfo->memoryType = UINT32_MAX;
16826  pAllocationInfo->deviceMemory = VK_NULL_HANDLE;
16827  pAllocationInfo->offset = 0;
16828  pAllocationInfo->size = hAllocation->GetSize();
16829  pAllocationInfo->pMappedData = VMA_NULL;
16830  pAllocationInfo->pUserData = hAllocation->GetUserData();
16831  return;
16832  }
16833  else if(localLastUseFrameIndex == localCurrFrameIndex)
16834  {
16835  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16836  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16837  pAllocationInfo->offset = hAllocation->GetOffset();
16838  pAllocationInfo->size = hAllocation->GetSize();
16839  pAllocationInfo->pMappedData = VMA_NULL;
16840  pAllocationInfo->pUserData = hAllocation->GetUserData();
16841  return;
16842  }
16843  else // Last use time earlier than current time.
16844  {
16845  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16846  {
16847  localLastUseFrameIndex = localCurrFrameIndex;
16848  }
16849  }
16850  }
16851  }
16852  else
16853  {
16854 #if VMA_STATS_STRING_ENABLED
16855  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16856  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16857  for(;;)
16858  {
16859  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16860  if(localLastUseFrameIndex == localCurrFrameIndex)
16861  {
16862  break;
16863  }
16864  else // Last use time earlier than current time.
16865  {
16866  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16867  {
16868  localLastUseFrameIndex = localCurrFrameIndex;
16869  }
16870  }
16871  }
16872 #endif
16873 
16874  pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
16875  pAllocationInfo->deviceMemory = hAllocation->GetMemory();
16876  pAllocationInfo->offset = hAllocation->GetOffset();
16877  pAllocationInfo->size = hAllocation->GetSize();
16878  pAllocationInfo->pMappedData = hAllocation->GetMappedData();
16879  pAllocationInfo->pUserData = hAllocation->GetUserData();
16880  }
16881 }
16882 
16883 bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation)
16884 {
16885  // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo.
16886  if(hAllocation->CanBecomeLost())
16887  {
16888  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16889  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16890  for(;;)
16891  {
16892  if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST)
16893  {
16894  return false;
16895  }
16896  else if(localLastUseFrameIndex == localCurrFrameIndex)
16897  {
16898  return true;
16899  }
16900  else // Last use time earlier than current time.
16901  {
16902  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16903  {
16904  localLastUseFrameIndex = localCurrFrameIndex;
16905  }
16906  }
16907  }
16908  }
16909  else
16910  {
16911 #if VMA_STATS_STRING_ENABLED
16912  uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load();
16913  uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex();
16914  for(;;)
16915  {
16916  VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST);
16917  if(localLastUseFrameIndex == localCurrFrameIndex)
16918  {
16919  break;
16920  }
16921  else // Last use time earlier than current time.
16922  {
16923  if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex))
16924  {
16925  localLastUseFrameIndex = localCurrFrameIndex;
16926  }
16927  }
16928  }
16929 #endif
16930 
16931  return true;
16932  }
16933 }
16934 
16935 VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
16936 {
16937  VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
16938 
16939  VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
16940 
16941  if(newCreateInfo.maxBlockCount == 0)
16942  {
16943  newCreateInfo.maxBlockCount = SIZE_MAX;
16944  }
16945  if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
16946  {
16947  return VK_ERROR_INITIALIZATION_FAILED;
16948  }
16949  // Memory type index out of range or forbidden.
16950  if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
16951  ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
16952  {
16953  return VK_ERROR_FEATURE_NOT_PRESENT;
16954  }
16955 
16956  const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
16957 
16958  *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
16959 
16960  VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
16961  if(res != VK_SUCCESS)
16962  {
16963  vma_delete(this, *pPool);
16964  *pPool = VMA_NULL;
16965  return res;
16966  }
16967 
16968  // Add to m_Pools.
16969  {
16970  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16971  (*pPool)->SetId(m_NextPoolId++);
16972  VmaVectorInsertSorted<VmaPointerLess>(m_Pools, *pPool);
16973  }
16974 
16975  return VK_SUCCESS;
16976 }
16977 
16978 void VmaAllocator_T::DestroyPool(VmaPool pool)
16979 {
16980  // Remove from m_Pools.
16981  {
16982  VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
16983  bool success = VmaVectorRemoveSorted<VmaPointerLess>(m_Pools, pool);
16984  VMA_ASSERT(success && "Pool not found in Allocator.");
16985  }
16986 
16987  vma_delete(this, pool);
16988 }
16989 
16990 void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
16991 {
16992  pool->m_BlockVector.GetPoolStats(pPoolStats);
16993 }
16994 
16995 void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
16996 {
16997  m_CurrentFrameIndex.store(frameIndex);
16998 
16999 #if VMA_MEMORY_BUDGET
17000  if(m_UseExtMemoryBudget)
17001  {
17002  UpdateVulkanBudget();
17003  }
17004 #endif // #if VMA_MEMORY_BUDGET
17005 }
17006 
17007 void VmaAllocator_T::MakePoolAllocationsLost(
17008  VmaPool hPool,
17009  size_t* pLostAllocationCount)
17010 {
17011  hPool->m_BlockVector.MakePoolAllocationsLost(
17012  m_CurrentFrameIndex.load(),
17013  pLostAllocationCount);
17014 }
17015 
17016 VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
17017 {
17018  return hPool->m_BlockVector.CheckCorruption();
17019 }
17020 
17021 VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
17022 {
17023  VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
17024 
17025  // Process default pools.
17026  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17027  {
17028  if(((1u << memTypeIndex) & memoryTypeBits) != 0)
17029  {
17030  VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
17031  VMA_ASSERT(pBlockVector);
17032  VkResult localRes = pBlockVector->CheckCorruption();
17033  switch(localRes)
17034  {
17035  case VK_ERROR_FEATURE_NOT_PRESENT:
17036  break;
17037  case VK_SUCCESS:
17038  finalRes = VK_SUCCESS;
17039  break;
17040  default:
17041  return localRes;
17042  }
17043  }
17044  }
17045 
17046  // Process custom pools.
17047  {
17048  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17049  for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex)
17050  {
17051  if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
17052  {
17053  VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption();
17054  switch(localRes)
17055  {
17056  case VK_ERROR_FEATURE_NOT_PRESENT:
17057  break;
17058  case VK_SUCCESS:
17059  finalRes = VK_SUCCESS;
17060  break;
17061  default:
17062  return localRes;
17063  }
17064  }
17065  }
17066  }
17067 
17068  return finalRes;
17069 }
17070 
17071 void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation)
17072 {
17073  *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false);
17074  (*pAllocation)->InitLost();
17075 }
17076 
17077 VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
17078 {
17079  const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
17080 
17081  // HeapSizeLimit is in effect for this heap.
17082  if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
17083  {
17084  const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
17085  VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
17086  for(;;)
17087  {
17088  const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
17089  if(blockBytesAfterAllocation > heapSize)
17090  {
17091  return VK_ERROR_OUT_OF_DEVICE_MEMORY;
17092  }
17093  if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
17094  {
17095  break;
17096  }
17097  }
17098  }
17099  else
17100  {
17101  m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
17102  }
17103 
17104  // VULKAN CALL vkAllocateMemory.
17105  VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
17106 
17107  if(res == VK_SUCCESS)
17108  {
17109 #if VMA_MEMORY_BUDGET
17110  ++m_Budget.m_OperationsSinceBudgetFetch;
17111 #endif
17112 
17113  // Informative callback.
17114  if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
17115  {
17116  (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
17117  }
17118  }
17119  else
17120  {
17121  m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
17122  }
17123 
17124  return res;
17125 }
17126 
17127 void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
17128 {
17129  // Informative callback.
17130  if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
17131  {
17132  (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
17133  }
17134 
17135  // VULKAN CALL vkFreeMemory.
17136  (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
17137 
17138  m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size;
17139 }
17140 
17141 VkResult VmaAllocator_T::BindVulkanBuffer(
17142  VkDeviceMemory memory,
17143  VkDeviceSize memoryOffset,
17144  VkBuffer buffer,
17145  const void* pNext)
17146 {
17147  if(pNext != VMA_NULL)
17148  {
17149 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17150  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17151  m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
17152  {
17153  VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
17154  bindBufferMemoryInfo.pNext = pNext;
17155  bindBufferMemoryInfo.buffer = buffer;
17156  bindBufferMemoryInfo.memory = memory;
17157  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17158  return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17159  }
17160  else
17161 #endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17162  {
17163  return VK_ERROR_EXTENSION_NOT_PRESENT;
17164  }
17165  }
17166  else
17167  {
17168  return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
17169  }
17170 }
17171 
17172 VkResult VmaAllocator_T::BindVulkanImage(
17173  VkDeviceMemory memory,
17174  VkDeviceSize memoryOffset,
17175  VkImage image,
17176  const void* pNext)
17177 {
17178  if(pNext != VMA_NULL)
17179  {
17180 #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
17181  if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
17182  m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
17183  {
17184  VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
17185  bindBufferMemoryInfo.pNext = pNext;
17186  bindBufferMemoryInfo.image = image;
17187  bindBufferMemoryInfo.memory = memory;
17188  bindBufferMemoryInfo.memoryOffset = memoryOffset;
17189  return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
17190  }
17191  else
17192 #endif // #if VMA_BIND_MEMORY2
17193  {
17194  return VK_ERROR_EXTENSION_NOT_PRESENT;
17195  }
17196  }
17197  else
17198  {
17199  return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
17200  }
17201 }
17202 
17203 VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
17204 {
17205  if(hAllocation->CanBecomeLost())
17206  {
17207  return VK_ERROR_MEMORY_MAP_FAILED;
17208  }
17209 
17210  switch(hAllocation->GetType())
17211  {
17212  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17213  {
17214  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17215  char *pBytes = VMA_NULL;
17216  VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
17217  if(res == VK_SUCCESS)
17218  {
17219  *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
17220  hAllocation->BlockAllocMap();
17221  }
17222  return res;
17223  }
17224  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17225  return hAllocation->DedicatedAllocMap(this, ppData);
17226  default:
17227  VMA_ASSERT(0);
17228  return VK_ERROR_MEMORY_MAP_FAILED;
17229  }
17230 }
17231 
17232 void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
17233 {
17234  switch(hAllocation->GetType())
17235  {
17236  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17237  {
17238  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17239  hAllocation->BlockAllocUnmap();
17240  pBlock->Unmap(this, 1);
17241  }
17242  break;
17243  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17244  hAllocation->DedicatedAllocUnmap(this);
17245  break;
17246  default:
17247  VMA_ASSERT(0);
17248  }
17249 }
17250 
17251 VkResult VmaAllocator_T::BindBufferMemory(
17252  VmaAllocation hAllocation,
17253  VkDeviceSize allocationLocalOffset,
17254  VkBuffer hBuffer,
17255  const void* pNext)
17256 {
17257  VkResult res = VK_SUCCESS;
17258  switch(hAllocation->GetType())
17259  {
17260  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17261  res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
17262  break;
17263  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17264  {
17265  VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
17266  VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?");
17267  res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
17268  break;
17269  }
17270  default:
17271  VMA_ASSERT(0);
17272  }
17273  return res;
17274 }
17275 
17276 VkResult VmaAllocator_T::BindImageMemory(
17277  VmaAllocation hAllocation,
17278  VkDeviceSize allocationLocalOffset,
17279  VkImage hImage,
17280  const void* pNext)
17281 {
17282  VkResult res = VK_SUCCESS;
17283  switch(hAllocation->GetType())
17284  {
17285  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17286  res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
17287  break;
17288  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17289  {
17290  VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
17291  VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?");
17292  res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
17293  break;
17294  }
17295  default:
17296  VMA_ASSERT(0);
17297  }
17298  return res;
17299 }
17300 
17301 VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
17302  VmaAllocation hAllocation,
17303  VkDeviceSize offset, VkDeviceSize size,
17304  VMA_CACHE_OPERATION op)
17305 {
17306  VkResult res = VK_SUCCESS;
17307 
17308  VkMappedMemoryRange memRange = {};
17309  if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
17310  {
17311  switch(op)
17312  {
17313  case VMA_CACHE_FLUSH:
17314  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
17315  break;
17316  case VMA_CACHE_INVALIDATE:
17317  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
17318  break;
17319  default:
17320  VMA_ASSERT(0);
17321  }
17322  }
17323  // else: Just ignore this call.
17324  return res;
17325 }
17326 
17327 VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
17328  uint32_t allocationCount,
17329  const VmaAllocation* allocations,
17330  const VkDeviceSize* offsets, const VkDeviceSize* sizes,
17331  VMA_CACHE_OPERATION op)
17332 {
17333  typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
17334  typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
17335  RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
17336 
17337  for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
17338  {
17339  const VmaAllocation alloc = allocations[allocIndex];
17340  const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
17341  const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
17342  VkMappedMemoryRange newRange;
17343  if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
17344  {
17345  ranges.push_back(newRange);
17346  }
17347  }
17348 
17349  VkResult res = VK_SUCCESS;
17350  if(!ranges.empty())
17351  {
17352  switch(op)
17353  {
17354  case VMA_CACHE_FLUSH:
17355  res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17356  break;
17357  case VMA_CACHE_INVALIDATE:
17358  res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
17359  break;
17360  default:
17361  VMA_ASSERT(0);
17362  }
17363  }
17364  // else: Just ignore this call.
17365  return res;
17366 }
17367 
17368 void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
17369 {
17370  VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
17371 
17372  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17373  {
17374  VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17375  AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex];
17376  VMA_ASSERT(pDedicatedAllocations);
17377  bool success = VmaVectorRemoveSorted<VmaPointerLess>(*pDedicatedAllocations, allocation);
17378  VMA_ASSERT(success);
17379  }
17380 
17381  VkDeviceMemory hMemory = allocation->GetMemory();
17382 
17383  /*
17384  There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
17385  before vkFreeMemory.
17386 
17387  if(allocation->GetMappedData() != VMA_NULL)
17388  {
17389  (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
17390  }
17391  */
17392 
17393  FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
17394 
17395  VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
17396 }
17397 
17398 uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
17399 {
17400  VkBufferCreateInfo dummyBufCreateInfo;
17401  VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
17402 
17403  uint32_t memoryTypeBits = 0;
17404 
17405  // Create buffer.
17406  VkBuffer buf = VK_NULL_HANDLE;
17407  VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
17408  m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
17409  if(res == VK_SUCCESS)
17410  {
17411  // Query for supported memory types.
17412  VkMemoryRequirements memReq;
17413  (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
17414  memoryTypeBits = memReq.memoryTypeBits;
17415 
17416  // Destroy buffer.
17417  (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
17418  }
17419 
17420  return memoryTypeBits;
17421 }
17422 
17423 uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
17424 {
17425  // Make sure memory information is already fetched.
17426  VMA_ASSERT(GetMemoryTypeCount() > 0);
17427 
17428  uint32_t memoryTypeBits = UINT32_MAX;
17429 
17430  if(!m_UseAmdDeviceCoherentMemory)
17431  {
17432  // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
17433  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17434  {
17435  if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17436  {
17437  memoryTypeBits &= ~(1u << memTypeIndex);
17438  }
17439  }
17440  }
17441 
17442  return memoryTypeBits;
17443 }
17444 
17445 bool VmaAllocator_T::GetFlushOrInvalidateRange(
17446  VmaAllocation allocation,
17447  VkDeviceSize offset, VkDeviceSize size,
17448  VkMappedMemoryRange& outRange) const
17449 {
17450  const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
17451  if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
17452  {
17453  const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
17454  const VkDeviceSize allocationSize = allocation->GetSize();
17455  VMA_ASSERT(offset <= allocationSize);
17456 
17457  outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
17458  outRange.pNext = VMA_NULL;
17459  outRange.memory = allocation->GetMemory();
17460 
17461  switch(allocation->GetType())
17462  {
17463  case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
17464  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17465  if(size == VK_WHOLE_SIZE)
17466  {
17467  outRange.size = allocationSize - outRange.offset;
17468  }
17469  else
17470  {
17471  VMA_ASSERT(offset + size <= allocationSize);
17472  outRange.size = VMA_MIN(
17473  VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
17474  allocationSize - outRange.offset);
17475  }
17476  break;
17477  case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
17478  {
17479  // 1. Still within this allocation.
17480  outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
17481  if(size == VK_WHOLE_SIZE)
17482  {
17483  size = allocationSize - offset;
17484  }
17485  else
17486  {
17487  VMA_ASSERT(offset + size <= allocationSize);
17488  }
17489  outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
17490 
17491  // 2. Adjust to whole block.
17492  const VkDeviceSize allocationOffset = allocation->GetOffset();
17493  VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
17494  const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
17495  outRange.offset += allocationOffset;
17496  outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
17497 
17498  break;
17499  }
17500  default:
17501  VMA_ASSERT(0);
17502  }
17503  return true;
17504  }
17505  return false;
17506 }
17507 
17508 #if VMA_MEMORY_BUDGET
17509 
17510 void VmaAllocator_T::UpdateVulkanBudget()
17511 {
17512  VMA_ASSERT(m_UseExtMemoryBudget);
17513 
17514  VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
17515 
17516  VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
17517  VmaPnextChainPushFront(&memProps, &budgetProps);
17518 
17519  GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
17520 
17521  {
17522  VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
17523 
17524  for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
17525  {
17526  m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
17527  m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
17528  m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
17529 
17530  // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
17531  if(m_Budget.m_VulkanBudget[heapIndex] == 0)
17532  {
17533  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
17534  }
17535  else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
17536  {
17537  m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
17538  }
17539  if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
17540  {
17541  m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
17542  }
17543  }
17544  m_Budget.m_OperationsSinceBudgetFetch = 0;
17545  }
17546 }
17547 
17548 #endif // #if VMA_MEMORY_BUDGET
17549 
17550 void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
17551 {
17552  if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
17553  !hAllocation->CanBecomeLost() &&
17554  (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17555  {
17556  void* pData = VMA_NULL;
17557  VkResult res = Map(hAllocation, &pData);
17558  if(res == VK_SUCCESS)
17559  {
17560  memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
17561  FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
17562  Unmap(hAllocation);
17563  }
17564  else
17565  {
17566  VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
17567  }
17568  }
17569 }
17570 
17571 uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
17572 {
17573  uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
17574  if(memoryTypeBits == UINT32_MAX)
17575  {
17576  memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
17577  m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
17578  }
17579  return memoryTypeBits;
17580 }
17581 
17582 #if VMA_STATS_STRING_ENABLED
17583 
17584 void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
17585 {
17586  bool dedicatedAllocationsStarted = false;
17587  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17588  {
17589  VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex);
17590  AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex];
17591  VMA_ASSERT(pDedicatedAllocVector);
17592  if(pDedicatedAllocVector->empty() == false)
17593  {
17594  if(dedicatedAllocationsStarted == false)
17595  {
17596  dedicatedAllocationsStarted = true;
17597  json.WriteString("DedicatedAllocations");
17598  json.BeginObject();
17599  }
17600 
17601  json.BeginString("Type ");
17602  json.ContinueString(memTypeIndex);
17603  json.EndString();
17604 
17605  json.BeginArray();
17606 
17607  for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i)
17608  {
17609  json.BeginObject(true);
17610  const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i];
17611  hAlloc->PrintParameters(json);
17612  json.EndObject();
17613  }
17614 
17615  json.EndArray();
17616  }
17617  }
17618  if(dedicatedAllocationsStarted)
17619  {
17620  json.EndObject();
17621  }
17622 
17623  {
17624  bool allocationsStarted = false;
17625  for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
17626  {
17627  if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false)
17628  {
17629  if(allocationsStarted == false)
17630  {
17631  allocationsStarted = true;
17632  json.WriteString("DefaultPools");
17633  json.BeginObject();
17634  }
17635 
17636  json.BeginString("Type ");
17637  json.ContinueString(memTypeIndex);
17638  json.EndString();
17639 
17640  m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
17641  }
17642  }
17643  if(allocationsStarted)
17644  {
17645  json.EndObject();
17646  }
17647  }
17648 
17649  // Custom pools
17650  {
17651  VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
17652  const size_t poolCount = m_Pools.size();
17653  if(poolCount > 0)
17654  {
17655  json.WriteString("Pools");
17656  json.BeginObject();
17657  for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex)
17658  {
17659  json.BeginString();
17660  json.ContinueString(m_Pools[poolIndex]->GetId());
17661  json.EndString();
17662 
17663  m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json);
17664  }
17665  json.EndObject();
17666  }
17667  }
17668 }
17669 
17670 #endif // #if VMA_STATS_STRING_ENABLED
17671 
17673 // Public interface
17674 
17675 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
17676  const VmaAllocatorCreateInfo* pCreateInfo,
17677  VmaAllocator* pAllocator)
17678 {
17679  VMA_ASSERT(pCreateInfo && pAllocator);
17680  VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
17681  (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 2));
17682  VMA_DEBUG_LOG("vmaCreateAllocator");
17683  *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
17684  return (*pAllocator)->Init(pCreateInfo);
17685 }
17686 
17687 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
17688  VmaAllocator allocator)
17689 {
17690  if(allocator != VK_NULL_HANDLE)
17691  {
17692  VMA_DEBUG_LOG("vmaDestroyAllocator");
17693  VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks;
17694  vma_delete(&allocationCallbacks, allocator);
17695  }
17696 }
17697 
17698 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
17699 {
17700  VMA_ASSERT(allocator && pAllocatorInfo);
17701  pAllocatorInfo->instance = allocator->m_hInstance;
17702  pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
17703  pAllocatorInfo->device = allocator->m_hDevice;
17704 }
17705 
17706 VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
17707  VmaAllocator allocator,
17708  const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
17709 {
17710  VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
17711  *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
17712 }
17713 
17714 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
17715  VmaAllocator allocator,
17716  const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
17717 {
17718  VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
17719  *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
17720 }
17721 
17722 VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
17723  VmaAllocator allocator,
17724  uint32_t memoryTypeIndex,
17725  VkMemoryPropertyFlags* pFlags)
17726 {
17727  VMA_ASSERT(allocator && pFlags);
17728  VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
17729  *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
17730 }
17731 
17732 VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
17733  VmaAllocator allocator,
17734  uint32_t frameIndex)
17735 {
17736  VMA_ASSERT(allocator);
17737  VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST);
17738 
17739  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17740 
17741  allocator->SetCurrentFrameIndex(frameIndex);
17742 }
17743 
17744 VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats(
17745  VmaAllocator allocator,
17746  VmaStats* pStats)
17747 {
17748  VMA_ASSERT(allocator && pStats);
17749  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17750  allocator->CalculateStats(pStats);
17751 }
17752 
17753 VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget(
17754  VmaAllocator allocator,
17755  VmaBudget* pBudget)
17756 {
17757  VMA_ASSERT(allocator && pBudget);
17758  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17759  allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount());
17760 }
17761 
17762 #if VMA_STATS_STRING_ENABLED
17763 
17764 VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
17765  VmaAllocator allocator,
17766  char** ppStatsString,
17767  VkBool32 detailedMap)
17768 {
17769  VMA_ASSERT(allocator && ppStatsString);
17770  VMA_DEBUG_GLOBAL_MUTEX_LOCK
17771 
17772  VmaStringBuilder sb(allocator);
17773  {
17774  VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
17775  json.BeginObject();
17776 
17777  VmaBudget budget[VK_MAX_MEMORY_HEAPS];
17778  allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount());
17779 
17780  VmaStats stats;
17781  allocator->CalculateStats(&stats);
17782 
17783  json.WriteString("Total");
17784  VmaPrintStatInfo(json, stats.total);
17785 
17786  for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
17787  {
17788  json.BeginString("Heap ");
17789  json.ContinueString(heapIndex);
17790  json.EndString();
17791  json.BeginObject();
17792 
17793  json.WriteString("Size");
17794  json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size);
17795 
17796  json.WriteString("Flags");
17797  json.BeginArray(true);
17798  if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
17799  {
17800  json.WriteString("DEVICE_LOCAL");
17801  }
17802  json.EndArray();
17803 
17804  json.WriteString("Budget");
17805  json.BeginObject();
17806  {
17807  json.WriteString("BlockBytes");
17808  json.WriteNumber(budget[heapIndex].blockBytes);
17809  json.WriteString("AllocationBytes");
17810  json.WriteNumber(budget[heapIndex].allocationBytes);
17811  json.WriteString("Usage");
17812  json.WriteNumber(budget[heapIndex].usage);
17813  json.WriteString("Budget");
17814  json.WriteNumber(budget[heapIndex].budget);
17815  }
17816  json.EndObject();
17817 
17818  if(stats.memoryHeap[heapIndex].blockCount > 0)
17819  {
17820  json.WriteString("Stats");
17821  VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]);
17822  }
17823 
17824  for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
17825  {
17826  if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
17827  {
17828  json.BeginString("Type ");
17829  json.ContinueString(typeIndex);
17830  json.EndString();
17831 
17832  json.BeginObject();
17833 
17834  json.WriteString("Flags");
17835  json.BeginArray(true);
17836  VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
17837  if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0)
17838  {
17839  json.WriteString("DEVICE_LOCAL");
17840  }
17841  if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
17842  {
17843  json.WriteString("HOST_VISIBLE");
17844  }
17845  if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
17846  {
17847  json.WriteString("HOST_COHERENT");
17848  }
17849  if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0)
17850  {
17851  json.WriteString("HOST_CACHED");
17852  }
17853  if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0)
17854  {
17855  json.WriteString("LAZILY_ALLOCATED");
17856  }
17857  if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)
17858  {
17859  json.WriteString(" PROTECTED");
17860  }
17861  if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
17862  {
17863  json.WriteString(" DEVICE_COHERENT");
17864  }
17865  if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0)
17866  {
17867  json.WriteString(" DEVICE_UNCACHED");
17868  }
17869  json.EndArray();
17870 
17871  if(stats.memoryType[typeIndex].blockCount > 0)
17872  {
17873  json.WriteString("Stats");
17874  VmaPrintStatInfo(json, stats.memoryType[typeIndex]);
17875  }
17876 
17877  json.EndObject();
17878  }
17879  }
17880 
17881  json.EndObject();
17882  }
17883  if(detailedMap == VK_TRUE)
17884  {
17885  allocator->PrintDetailedMap(json);
17886  }
17887 
17888  json.EndObject();
17889  }
17890 
17891  const size_t len = sb.GetLength();
17892  char* const pChars = vma_new_array(allocator, char, len + 1);
17893  if(len > 0)
17894  {
17895  memcpy(pChars, sb.GetData(), len);
17896  }
17897  pChars[len] = '\0';
17898  *ppStatsString = pChars;
17899 }
17900 
17901 VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
17902  VmaAllocator allocator,
17903  char* pStatsString)
17904 {
17905  if(pStatsString != VMA_NULL)
17906  {
17907  VMA_ASSERT(allocator);
17908  size_t len = strlen(pStatsString);
17909  vma_delete_array(allocator, pStatsString, len + 1);
17910  }
17911 }
17912 
17913 #endif // #if VMA_STATS_STRING_ENABLED
17914 
17915 /*
17916 This function is not protected by any mutex because it just reads immutable data.
17917 */
17918 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
17919  VmaAllocator allocator,
17920  uint32_t memoryTypeBits,
17921  const VmaAllocationCreateInfo* pAllocationCreateInfo,
17922  uint32_t* pMemoryTypeIndex)
17923 {
17924  VMA_ASSERT(allocator != VK_NULL_HANDLE);
17925  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
17926  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
17927 
17928  memoryTypeBits &= allocator->GetGlobalMemoryTypeBits();
17929 
17930  if(pAllocationCreateInfo->memoryTypeBits != 0)
17931  {
17932  memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
17933  }
17934 
17935  uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags;
17936  uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags;
17937  uint32_t notPreferredFlags = 0;
17938 
17939  // Convert usage to requiredFlags and preferredFlags.
17940  switch(pAllocationCreateInfo->usage)
17941  {
17943  break;
17945  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17946  {
17947  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17948  }
17949  break;
17951  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
17952  break;
17954  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17955  if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
17956  {
17957  preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17958  }
17959  break;
17961  requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
17962  preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
17963  break;
17965  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
17966  break;
17968  requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
17969  break;
17970  default:
17971  VMA_ASSERT(0);
17972  break;
17973  }
17974 
17975  // Avoid DEVICE_COHERENT unless explicitly requested.
17976  if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) &
17977  (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
17978  {
17979  notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY;
17980  }
17981 
17982  *pMemoryTypeIndex = UINT32_MAX;
17983  uint32_t minCost = UINT32_MAX;
17984  for(uint32_t memTypeIndex = 0, memTypeBit = 1;
17985  memTypeIndex < allocator->GetMemoryTypeCount();
17986  ++memTypeIndex, memTypeBit <<= 1)
17987  {
17988  // This memory type is acceptable according to memoryTypeBits bitmask.
17989  if((memTypeBit & memoryTypeBits) != 0)
17990  {
17991  const VkMemoryPropertyFlags currFlags =
17992  allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
17993  // This memory type contains requiredFlags.
17994  if((requiredFlags & ~currFlags) == 0)
17995  {
17996  // Calculate cost as number of bits from preferredFlags not present in this memory type.
17997  uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) +
17998  VmaCountBitsSet(currFlags & notPreferredFlags);
17999  // Remember memory type with lowest cost.
18000  if(currCost < minCost)
18001  {
18002  *pMemoryTypeIndex = memTypeIndex;
18003  if(currCost == 0)
18004  {
18005  return VK_SUCCESS;
18006  }
18007  minCost = currCost;
18008  }
18009  }
18010  }
18011  }
18012  return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
18013 }
18014 
18015 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
18016  VmaAllocator allocator,
18017  const VkBufferCreateInfo* pBufferCreateInfo,
18018  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18019  uint32_t* pMemoryTypeIndex)
18020 {
18021  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18022  VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
18023  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18024  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18025 
18026  const VkDevice hDev = allocator->m_hDevice;
18027  VkBuffer hBuffer = VK_NULL_HANDLE;
18028  VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer(
18029  hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
18030  if(res == VK_SUCCESS)
18031  {
18032  VkMemoryRequirements memReq = {};
18033  allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements(
18034  hDev, hBuffer, &memReq);
18035 
18036  res = vmaFindMemoryTypeIndex(
18037  allocator,
18038  memReq.memoryTypeBits,
18039  pAllocationCreateInfo,
18040  pMemoryTypeIndex);
18041 
18042  allocator->GetVulkanFunctions().vkDestroyBuffer(
18043  hDev, hBuffer, allocator->GetAllocationCallbacks());
18044  }
18045  return res;
18046 }
18047 
18048 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
18049  VmaAllocator allocator,
18050  const VkImageCreateInfo* pImageCreateInfo,
18051  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18052  uint32_t* pMemoryTypeIndex)
18053 {
18054  VMA_ASSERT(allocator != VK_NULL_HANDLE);
18055  VMA_ASSERT(pImageCreateInfo != VMA_NULL);
18056  VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
18057  VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
18058 
18059  const VkDevice hDev = allocator->m_hDevice;
18060  VkImage hImage = VK_NULL_HANDLE;
18061  VkResult res = allocator->GetVulkanFunctions().vkCreateImage(
18062  hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
18063  if(res == VK_SUCCESS)
18064  {
18065  VkMemoryRequirements memReq = {};
18066  allocator->GetVulkanFunctions().vkGetImageMemoryRequirements(
18067  hDev, hImage, &memReq);
18068 
18069  res = vmaFindMemoryTypeIndex(
18070  allocator,
18071  memReq.memoryTypeBits,
18072  pAllocationCreateInfo,
18073  pMemoryTypeIndex);
18074 
18075  allocator->GetVulkanFunctions().vkDestroyImage(
18076  hDev, hImage, allocator->GetAllocationCallbacks());
18077  }
18078  return res;
18079 }
18080 
18081 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
18082  VmaAllocator allocator,
18083  const VmaPoolCreateInfo* pCreateInfo,
18084  VmaPool* pPool)
18085 {
18086  VMA_ASSERT(allocator && pCreateInfo && pPool);
18087 
18088  VMA_DEBUG_LOG("vmaCreatePool");
18089 
18090  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18091 
18092  VkResult res = allocator->CreatePool(pCreateInfo, pPool);
18093 
18094 #if VMA_RECORDING_ENABLED
18095  if(allocator->GetRecorder() != VMA_NULL)
18096  {
18097  allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool);
18098  }
18099 #endif
18100 
18101  return res;
18102 }
18103 
18104 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
18105  VmaAllocator allocator,
18106  VmaPool pool)
18107 {
18108  VMA_ASSERT(allocator);
18109 
18110  if(pool == VK_NULL_HANDLE)
18111  {
18112  return;
18113  }
18114 
18115  VMA_DEBUG_LOG("vmaDestroyPool");
18116 
18117  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18118 
18119 #if VMA_RECORDING_ENABLED
18120  if(allocator->GetRecorder() != VMA_NULL)
18121  {
18122  allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool);
18123  }
18124 #endif
18125 
18126  allocator->DestroyPool(pool);
18127 }
18128 
18129 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats(
18130  VmaAllocator allocator,
18131  VmaPool pool,
18132  VmaPoolStats* pPoolStats)
18133 {
18134  VMA_ASSERT(allocator && pool && pPoolStats);
18135 
18136  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18137 
18138  allocator->GetPoolStats(pool, pPoolStats);
18139 }
18140 
18141 VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost(
18142  VmaAllocator allocator,
18143  VmaPool pool,
18144  size_t* pLostAllocationCount)
18145 {
18146  VMA_ASSERT(allocator && pool);
18147 
18148  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18149 
18150 #if VMA_RECORDING_ENABLED
18151  if(allocator->GetRecorder() != VMA_NULL)
18152  {
18153  allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool);
18154  }
18155 #endif
18156 
18157  allocator->MakePoolAllocationsLost(pool, pLostAllocationCount);
18158 }
18159 
18160 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
18161 {
18162  VMA_ASSERT(allocator && pool);
18163 
18164  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18165 
18166  VMA_DEBUG_LOG("vmaCheckPoolCorruption");
18167 
18168  return allocator->CheckPoolCorruption(pool);
18169 }
18170 
18171 VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
18172  VmaAllocator allocator,
18173  VmaPool pool,
18174  const char** ppName)
18175 {
18176  VMA_ASSERT(allocator && pool && ppName);
18177 
18178  VMA_DEBUG_LOG("vmaGetPoolName");
18179 
18180  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18181 
18182  *ppName = pool->GetName();
18183 }
18184 
18185 VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
18186  VmaAllocator allocator,
18187  VmaPool pool,
18188  const char* pName)
18189 {
18190  VMA_ASSERT(allocator && pool);
18191 
18192  VMA_DEBUG_LOG("vmaSetPoolName");
18193 
18194  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18195 
18196  pool->SetName(pName);
18197 
18198 #if VMA_RECORDING_ENABLED
18199  if(allocator->GetRecorder() != VMA_NULL)
18200  {
18201  allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName);
18202  }
18203 #endif
18204 }
18205 
18206 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
18207  VmaAllocator allocator,
18208  const VkMemoryRequirements* pVkMemoryRequirements,
18209  const VmaAllocationCreateInfo* pCreateInfo,
18210  VmaAllocation* pAllocation,
18211  VmaAllocationInfo* pAllocationInfo)
18212 {
18213  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
18214 
18215  VMA_DEBUG_LOG("vmaAllocateMemory");
18216 
18217  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18218 
18219  VkResult result = allocator->AllocateMemory(
18220  *pVkMemoryRequirements,
18221  false, // requiresDedicatedAllocation
18222  false, // prefersDedicatedAllocation
18223  VK_NULL_HANDLE, // dedicatedBuffer
18224  UINT32_MAX, // dedicatedBufferUsage
18225  VK_NULL_HANDLE, // dedicatedImage
18226  *pCreateInfo,
18227  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18228  1, // allocationCount
18229  pAllocation);
18230 
18231 #if VMA_RECORDING_ENABLED
18232  if(allocator->GetRecorder() != VMA_NULL)
18233  {
18234  allocator->GetRecorder()->RecordAllocateMemory(
18235  allocator->GetCurrentFrameIndex(),
18236  *pVkMemoryRequirements,
18237  *pCreateInfo,
18238  *pAllocation);
18239  }
18240 #endif
18241 
18242  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18243  {
18244  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18245  }
18246 
18247  return result;
18248 }
18249 
18250 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
18251  VmaAllocator allocator,
18252  const VkMemoryRequirements* pVkMemoryRequirements,
18253  const VmaAllocationCreateInfo* pCreateInfo,
18254  size_t allocationCount,
18255  VmaAllocation* pAllocations,
18256  VmaAllocationInfo* pAllocationInfo)
18257 {
18258  if(allocationCount == 0)
18259  {
18260  return VK_SUCCESS;
18261  }
18262 
18263  VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
18264 
18265  VMA_DEBUG_LOG("vmaAllocateMemoryPages");
18266 
18267  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18268 
18269  VkResult result = allocator->AllocateMemory(
18270  *pVkMemoryRequirements,
18271  false, // requiresDedicatedAllocation
18272  false, // prefersDedicatedAllocation
18273  VK_NULL_HANDLE, // dedicatedBuffer
18274  UINT32_MAX, // dedicatedBufferUsage
18275  VK_NULL_HANDLE, // dedicatedImage
18276  *pCreateInfo,
18277  VMA_SUBALLOCATION_TYPE_UNKNOWN,
18278  allocationCount,
18279  pAllocations);
18280 
18281 #if VMA_RECORDING_ENABLED
18282  if(allocator->GetRecorder() != VMA_NULL)
18283  {
18284  allocator->GetRecorder()->RecordAllocateMemoryPages(
18285  allocator->GetCurrentFrameIndex(),
18286  *pVkMemoryRequirements,
18287  *pCreateInfo,
18288  (uint64_t)allocationCount,
18289  pAllocations);
18290  }
18291 #endif
18292 
18293  if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
18294  {
18295  for(size_t i = 0; i < allocationCount; ++i)
18296  {
18297  allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
18298  }
18299  }
18300 
18301  return result;
18302 }
18303 
18304 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
18305  VmaAllocator allocator,
18306  VkBuffer buffer,
18307  const VmaAllocationCreateInfo* pCreateInfo,
18308  VmaAllocation* pAllocation,
18309  VmaAllocationInfo* pAllocationInfo)
18310 {
18311  VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18312 
18313  VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
18314 
18315  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18316 
18317  VkMemoryRequirements vkMemReq = {};
18318  bool requiresDedicatedAllocation = false;
18319  bool prefersDedicatedAllocation = false;
18320  allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
18321  requiresDedicatedAllocation,
18322  prefersDedicatedAllocation);
18323 
18324  VkResult result = allocator->AllocateMemory(
18325  vkMemReq,
18326  requiresDedicatedAllocation,
18327  prefersDedicatedAllocation,
18328  buffer, // dedicatedBuffer
18329  UINT32_MAX, // dedicatedBufferUsage
18330  VK_NULL_HANDLE, // dedicatedImage
18331  *pCreateInfo,
18332  VMA_SUBALLOCATION_TYPE_BUFFER,
18333  1, // allocationCount
18334  pAllocation);
18335 
18336 #if VMA_RECORDING_ENABLED
18337  if(allocator->GetRecorder() != VMA_NULL)
18338  {
18339  allocator->GetRecorder()->RecordAllocateMemoryForBuffer(
18340  allocator->GetCurrentFrameIndex(),
18341  vkMemReq,
18342  requiresDedicatedAllocation,
18343  prefersDedicatedAllocation,
18344  *pCreateInfo,
18345  *pAllocation);
18346  }
18347 #endif
18348 
18349  if(pAllocationInfo && result == VK_SUCCESS)
18350  {
18351  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18352  }
18353 
18354  return result;
18355 }
18356 
18357 VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
18358  VmaAllocator allocator,
18359  VkImage image,
18360  const VmaAllocationCreateInfo* pCreateInfo,
18361  VmaAllocation* pAllocation,
18362  VmaAllocationInfo* pAllocationInfo)
18363 {
18364  VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
18365 
18366  VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
18367 
18368  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18369 
18370  VkMemoryRequirements vkMemReq = {};
18371  bool requiresDedicatedAllocation = false;
18372  bool prefersDedicatedAllocation = false;
18373  allocator->GetImageMemoryRequirements(image, vkMemReq,
18374  requiresDedicatedAllocation, prefersDedicatedAllocation);
18375 
18376  VkResult result = allocator->AllocateMemory(
18377  vkMemReq,
18378  requiresDedicatedAllocation,
18379  prefersDedicatedAllocation,
18380  VK_NULL_HANDLE, // dedicatedBuffer
18381  UINT32_MAX, // dedicatedBufferUsage
18382  image, // dedicatedImage
18383  *pCreateInfo,
18384  VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
18385  1, // allocationCount
18386  pAllocation);
18387 
18388 #if VMA_RECORDING_ENABLED
18389  if(allocator->GetRecorder() != VMA_NULL)
18390  {
18391  allocator->GetRecorder()->RecordAllocateMemoryForImage(
18392  allocator->GetCurrentFrameIndex(),
18393  vkMemReq,
18394  requiresDedicatedAllocation,
18395  prefersDedicatedAllocation,
18396  *pCreateInfo,
18397  *pAllocation);
18398  }
18399 #endif
18400 
18401  if(pAllocationInfo && result == VK_SUCCESS)
18402  {
18403  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
18404  }
18405 
18406  return result;
18407 }
18408 
18409 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
18410  VmaAllocator allocator,
18411  VmaAllocation allocation)
18412 {
18413  VMA_ASSERT(allocator);
18414 
18415  if(allocation == VK_NULL_HANDLE)
18416  {
18417  return;
18418  }
18419 
18420  VMA_DEBUG_LOG("vmaFreeMemory");
18421 
18422  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18423 
18424 #if VMA_RECORDING_ENABLED
18425  if(allocator->GetRecorder() != VMA_NULL)
18426  {
18427  allocator->GetRecorder()->RecordFreeMemory(
18428  allocator->GetCurrentFrameIndex(),
18429  allocation);
18430  }
18431 #endif
18432 
18433  allocator->FreeMemory(
18434  1, // allocationCount
18435  &allocation);
18436 }
18437 
18438 VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
18439  VmaAllocator allocator,
18440  size_t allocationCount,
18441  const VmaAllocation* pAllocations)
18442 {
18443  if(allocationCount == 0)
18444  {
18445  return;
18446  }
18447 
18448  VMA_ASSERT(allocator);
18449 
18450  VMA_DEBUG_LOG("vmaFreeMemoryPages");
18451 
18452  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18453 
18454 #if VMA_RECORDING_ENABLED
18455  if(allocator->GetRecorder() != VMA_NULL)
18456  {
18457  allocator->GetRecorder()->RecordFreeMemoryPages(
18458  allocator->GetCurrentFrameIndex(),
18459  (uint64_t)allocationCount,
18460  pAllocations);
18461  }
18462 #endif
18463 
18464  allocator->FreeMemory(allocationCount, pAllocations);
18465 }
18466 
18467 VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation(
18468  VmaAllocator allocator,
18469  VmaAllocation allocation,
18470  VkDeviceSize newSize)
18471 {
18472  VMA_ASSERT(allocator && allocation);
18473 
18474  VMA_DEBUG_LOG("vmaResizeAllocation");
18475 
18476  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18477 
18478  return allocator->ResizeAllocation(allocation, newSize);
18479 }
18480 
18481 VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
18482  VmaAllocator allocator,
18483  VmaAllocation allocation,
18484  VmaAllocationInfo* pAllocationInfo)
18485 {
18486  VMA_ASSERT(allocator && allocation && pAllocationInfo);
18487 
18488  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18489 
18490 #if VMA_RECORDING_ENABLED
18491  if(allocator->GetRecorder() != VMA_NULL)
18492  {
18493  allocator->GetRecorder()->RecordGetAllocationInfo(
18494  allocator->GetCurrentFrameIndex(),
18495  allocation);
18496  }
18497 #endif
18498 
18499  allocator->GetAllocationInfo(allocation, pAllocationInfo);
18500 }
18501 
18502 VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation(
18503  VmaAllocator allocator,
18504  VmaAllocation allocation)
18505 {
18506  VMA_ASSERT(allocator && allocation);
18507 
18508  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18509 
18510 #if VMA_RECORDING_ENABLED
18511  if(allocator->GetRecorder() != VMA_NULL)
18512  {
18513  allocator->GetRecorder()->RecordTouchAllocation(
18514  allocator->GetCurrentFrameIndex(),
18515  allocation);
18516  }
18517 #endif
18518 
18519  return allocator->TouchAllocation(allocation);
18520 }
18521 
18522 VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
18523  VmaAllocator allocator,
18524  VmaAllocation allocation,
18525  void* pUserData)
18526 {
18527  VMA_ASSERT(allocator && allocation);
18528 
18529  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18530 
18531  allocation->SetUserData(allocator, pUserData);
18532 
18533 #if VMA_RECORDING_ENABLED
18534  if(allocator->GetRecorder() != VMA_NULL)
18535  {
18536  allocator->GetRecorder()->RecordSetAllocationUserData(
18537  allocator->GetCurrentFrameIndex(),
18538  allocation,
18539  pUserData);
18540  }
18541 #endif
18542 }
18543 
18544 VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation(
18545  VmaAllocator allocator,
18546  VmaAllocation* pAllocation)
18547 {
18548  VMA_ASSERT(allocator && pAllocation);
18549 
18550  VMA_DEBUG_GLOBAL_MUTEX_LOCK;
18551 
18552  allocator->CreateLostAllocation(pAllocation);
18553 
18554 #if VMA_RECORDING_ENABLED
18555  if(allocator->GetRecorder() != VMA_NULL)
18556  {
18557  allocator->GetRecorder()->RecordCreateLostAllocation(
18558  allocator->GetCurrentFrameIndex(),
18559  *pAllocation);
18560  }
18561 #endif
18562 }
18563 
18564 VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
18565  VmaAllocator allocator,
18566  VmaAllocation allocation,
18567  void** ppData)
18568 {
18569  VMA_ASSERT(allocator && allocation && ppData);
18570 
18571  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18572 
18573  VkResult res = allocator->Map(allocation, ppData);
18574 
18575 #if VMA_RECORDING_ENABLED
18576  if(allocator->GetRecorder() != VMA_NULL)
18577  {
18578  allocator->GetRecorder()->RecordMapMemory(
18579  allocator->GetCurrentFrameIndex(),
18580  allocation);
18581  }
18582 #endif
18583 
18584  return res;
18585 }
18586 
18587 VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
18588  VmaAllocator allocator,
18589  VmaAllocation allocation)
18590 {
18591  VMA_ASSERT(allocator && allocation);
18592 
18593  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18594 
18595 #if VMA_RECORDING_ENABLED
18596  if(allocator->GetRecorder() != VMA_NULL)
18597  {
18598  allocator->GetRecorder()->RecordUnmapMemory(
18599  allocator->GetCurrentFrameIndex(),
18600  allocation);
18601  }
18602 #endif
18603 
18604  allocator->Unmap(allocation);
18605 }
18606 
18607 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18608 {
18609  VMA_ASSERT(allocator && allocation);
18610 
18611  VMA_DEBUG_LOG("vmaFlushAllocation");
18612 
18613  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18614 
18615  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
18616 
18617 #if VMA_RECORDING_ENABLED
18618  if(allocator->GetRecorder() != VMA_NULL)
18619  {
18620  allocator->GetRecorder()->RecordFlushAllocation(
18621  allocator->GetCurrentFrameIndex(),
18622  allocation, offset, size);
18623  }
18624 #endif
18625 
18626  return res;
18627 }
18628 
18629 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
18630 {
18631  VMA_ASSERT(allocator && allocation);
18632 
18633  VMA_DEBUG_LOG("vmaInvalidateAllocation");
18634 
18635  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18636 
18637  const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
18638 
18639 #if VMA_RECORDING_ENABLED
18640  if(allocator->GetRecorder() != VMA_NULL)
18641  {
18642  allocator->GetRecorder()->RecordInvalidateAllocation(
18643  allocator->GetCurrentFrameIndex(),
18644  allocation, offset, size);
18645  }
18646 #endif
18647 
18648  return res;
18649 }
18650 
18651 VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
18652  VmaAllocator allocator,
18653  uint32_t allocationCount,
18654  const VmaAllocation* allocations,
18655  const VkDeviceSize* offsets,
18656  const VkDeviceSize* sizes)
18657 {
18658  VMA_ASSERT(allocator);
18659 
18660  if(allocationCount == 0)
18661  {
18662  return VK_SUCCESS;
18663  }
18664 
18665  VMA_ASSERT(allocations);
18666 
18667  VMA_DEBUG_LOG("vmaFlushAllocations");
18668 
18669  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18670 
18671  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
18672 
18673 #if VMA_RECORDING_ENABLED
18674  if(allocator->GetRecorder() != VMA_NULL)
18675  {
18676  //TODO
18677  }
18678 #endif
18679 
18680  return res;
18681 }
18682 
18683 VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
18684  VmaAllocator allocator,
18685  uint32_t allocationCount,
18686  const VmaAllocation* allocations,
18687  const VkDeviceSize* offsets,
18688  const VkDeviceSize* sizes)
18689 {
18690  VMA_ASSERT(allocator);
18691 
18692  if(allocationCount == 0)
18693  {
18694  return VK_SUCCESS;
18695  }
18696 
18697  VMA_ASSERT(allocations);
18698 
18699  VMA_DEBUG_LOG("vmaInvalidateAllocations");
18700 
18701  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18702 
18703  const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
18704 
18705 #if VMA_RECORDING_ENABLED
18706  if(allocator->GetRecorder() != VMA_NULL)
18707  {
18708  //TODO
18709  }
18710 #endif
18711 
18712  return res;
18713 }
18714 
18715 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
18716 {
18717  VMA_ASSERT(allocator);
18718 
18719  VMA_DEBUG_LOG("vmaCheckCorruption");
18720 
18721  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18722 
18723  return allocator->CheckCorruption(memoryTypeBits);
18724 }
18725 
18726 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment(
18727  VmaAllocator allocator,
18728  const VmaAllocation* pAllocations,
18729  size_t allocationCount,
18730  VkBool32* pAllocationsChanged,
18731  const VmaDefragmentationInfo *pDefragmentationInfo,
18732  VmaDefragmentationStats* pDefragmentationStats)
18733 {
18734  // Deprecated interface, reimplemented using new one.
18735 
18736  VmaDefragmentationInfo2 info2 = {};
18737  info2.allocationCount = (uint32_t)allocationCount;
18738  info2.pAllocations = pAllocations;
18739  info2.pAllocationsChanged = pAllocationsChanged;
18740  if(pDefragmentationInfo != VMA_NULL)
18741  {
18742  info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove;
18743  info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove;
18744  }
18745  else
18746  {
18747  info2.maxCpuAllocationsToMove = UINT32_MAX;
18748  info2.maxCpuBytesToMove = VK_WHOLE_SIZE;
18749  }
18750  // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero.
18751 
18753  VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx);
18754  if(res == VK_NOT_READY)
18755  {
18756  res = vmaDefragmentationEnd( allocator, ctx);
18757  }
18758  return res;
18759 }
18760 
18761 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin(
18762  VmaAllocator allocator,
18763  const VmaDefragmentationInfo2* pInfo,
18764  VmaDefragmentationStats* pStats,
18765  VmaDefragmentationContext *pContext)
18766 {
18767  VMA_ASSERT(allocator && pInfo && pContext);
18768 
18769  // Degenerate case: Nothing to defragment.
18770  if(pInfo->allocationCount == 0 && pInfo->poolCount == 0)
18771  {
18772  return VK_SUCCESS;
18773  }
18774 
18775  VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL);
18776  VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL);
18777  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations));
18778  VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools));
18779 
18780  VMA_DEBUG_LOG("vmaDefragmentationBegin");
18781 
18782  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18783 
18784  VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext);
18785 
18786 #if VMA_RECORDING_ENABLED
18787  if(allocator->GetRecorder() != VMA_NULL)
18788  {
18789  allocator->GetRecorder()->RecordDefragmentationBegin(
18790  allocator->GetCurrentFrameIndex(), *pInfo, *pContext);
18791  }
18792 #endif
18793 
18794  return res;
18795 }
18796 
18797 VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd(
18798  VmaAllocator allocator,
18799  VmaDefragmentationContext context)
18800 {
18801  VMA_ASSERT(allocator);
18802 
18803  VMA_DEBUG_LOG("vmaDefragmentationEnd");
18804 
18805  if(context != VK_NULL_HANDLE)
18806  {
18807  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18808 
18809 #if VMA_RECORDING_ENABLED
18810  if(allocator->GetRecorder() != VMA_NULL)
18811  {
18812  allocator->GetRecorder()->RecordDefragmentationEnd(
18813  allocator->GetCurrentFrameIndex(), context);
18814  }
18815 #endif
18816 
18817  return allocator->DefragmentationEnd(context);
18818  }
18819  else
18820  {
18821  return VK_SUCCESS;
18822  }
18823 }
18824 
18825 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
18826  VmaAllocator allocator,
18827  VmaDefragmentationContext context,
18829  )
18830 {
18831  VMA_ASSERT(allocator);
18832  VMA_ASSERT(pInfo);
18833 
18834  VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
18835 
18836  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18837 
18838  if(context == VK_NULL_HANDLE)
18839  {
18840  pInfo->moveCount = 0;
18841  return VK_SUCCESS;
18842  }
18843 
18844  return allocator->DefragmentationPassBegin(pInfo, context);
18845 }
18846 VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
18847  VmaAllocator allocator,
18848  VmaDefragmentationContext context)
18849 {
18850  VMA_ASSERT(allocator);
18851 
18852  VMA_DEBUG_LOG("vmaEndDefragmentationPass");
18853  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18854 
18855  if(context == VK_NULL_HANDLE)
18856  return VK_SUCCESS;
18857 
18858  return allocator->DefragmentationPassEnd(context);
18859 }
18860 
18861 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
18862  VmaAllocator allocator,
18863  VmaAllocation allocation,
18864  VkBuffer buffer)
18865 {
18866  VMA_ASSERT(allocator && allocation && buffer);
18867 
18868  VMA_DEBUG_LOG("vmaBindBufferMemory");
18869 
18870  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18871 
18872  return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
18873 }
18874 
18875 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
18876  VmaAllocator allocator,
18877  VmaAllocation allocation,
18878  VkDeviceSize allocationLocalOffset,
18879  VkBuffer buffer,
18880  const void* pNext)
18881 {
18882  VMA_ASSERT(allocator && allocation && buffer);
18883 
18884  VMA_DEBUG_LOG("vmaBindBufferMemory2");
18885 
18886  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18887 
18888  return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
18889 }
18890 
18891 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
18892  VmaAllocator allocator,
18893  VmaAllocation allocation,
18894  VkImage image)
18895 {
18896  VMA_ASSERT(allocator && allocation && image);
18897 
18898  VMA_DEBUG_LOG("vmaBindImageMemory");
18899 
18900  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18901 
18902  return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
18903 }
18904 
18905 VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
18906  VmaAllocator allocator,
18907  VmaAllocation allocation,
18908  VkDeviceSize allocationLocalOffset,
18909  VkImage image,
18910  const void* pNext)
18911 {
18912  VMA_ASSERT(allocator && allocation && image);
18913 
18914  VMA_DEBUG_LOG("vmaBindImageMemory2");
18915 
18916  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18917 
18918  return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
18919 }
18920 
18921 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
18922  VmaAllocator allocator,
18923  const VkBufferCreateInfo* pBufferCreateInfo,
18924  const VmaAllocationCreateInfo* pAllocationCreateInfo,
18925  VkBuffer* pBuffer,
18926  VmaAllocation* pAllocation,
18927  VmaAllocationInfo* pAllocationInfo)
18928 {
18929  VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
18930 
18931  if(pBufferCreateInfo->size == 0)
18932  {
18933  return VK_ERROR_VALIDATION_FAILED_EXT;
18934  }
18935  if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
18936  !allocator->m_UseKhrBufferDeviceAddress)
18937  {
18938  VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
18939  return VK_ERROR_VALIDATION_FAILED_EXT;
18940  }
18941 
18942  VMA_DEBUG_LOG("vmaCreateBuffer");
18943 
18944  VMA_DEBUG_GLOBAL_MUTEX_LOCK
18945 
18946  *pBuffer = VK_NULL_HANDLE;
18947  *pAllocation = VK_NULL_HANDLE;
18948 
18949  // 1. Create VkBuffer.
18950  VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
18951  allocator->m_hDevice,
18952  pBufferCreateInfo,
18953  allocator->GetAllocationCallbacks(),
18954  pBuffer);
18955  if(res >= 0)
18956  {
18957  // 2. vkGetBufferMemoryRequirements.
18958  VkMemoryRequirements vkMemReq = {};
18959  bool requiresDedicatedAllocation = false;
18960  bool prefersDedicatedAllocation = false;
18961  allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
18962  requiresDedicatedAllocation, prefersDedicatedAllocation);
18963 
18964  // 3. Allocate memory using allocator.
18965  res = allocator->AllocateMemory(
18966  vkMemReq,
18967  requiresDedicatedAllocation,
18968  prefersDedicatedAllocation,
18969  *pBuffer, // dedicatedBuffer
18970  pBufferCreateInfo->usage, // dedicatedBufferUsage
18971  VK_NULL_HANDLE, // dedicatedImage
18972  *pAllocationCreateInfo,
18973  VMA_SUBALLOCATION_TYPE_BUFFER,
18974  1, // allocationCount
18975  pAllocation);
18976 
18977 #if VMA_RECORDING_ENABLED
18978  if(allocator->GetRecorder() != VMA_NULL)
18979  {
18980  allocator->GetRecorder()->RecordCreateBuffer(
18981  allocator->GetCurrentFrameIndex(),
18982  *pBufferCreateInfo,
18983  *pAllocationCreateInfo,
18984  *pAllocation);
18985  }
18986 #endif
18987 
18988  if(res >= 0)
18989  {
18990  // 3. Bind buffer with memory.
18991  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
18992  {
18993  res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
18994  }
18995  if(res >= 0)
18996  {
18997  // All steps succeeded.
18998  #if VMA_STATS_STRING_ENABLED
18999  (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
19000  #endif
19001  if(pAllocationInfo != VMA_NULL)
19002  {
19003  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19004  }
19005 
19006  return VK_SUCCESS;
19007  }
19008  allocator->FreeMemory(
19009  1, // allocationCount
19010  pAllocation);
19011  *pAllocation = VK_NULL_HANDLE;
19012  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19013  *pBuffer = VK_NULL_HANDLE;
19014  return res;
19015  }
19016  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
19017  *pBuffer = VK_NULL_HANDLE;
19018  return res;
19019  }
19020  return res;
19021 }
19022 
19023 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
19024  VmaAllocator allocator,
19025  VkBuffer buffer,
19026  VmaAllocation allocation)
19027 {
19028  VMA_ASSERT(allocator);
19029 
19030  if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19031  {
19032  return;
19033  }
19034 
19035  VMA_DEBUG_LOG("vmaDestroyBuffer");
19036 
19037  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19038 
19039 #if VMA_RECORDING_ENABLED
19040  if(allocator->GetRecorder() != VMA_NULL)
19041  {
19042  allocator->GetRecorder()->RecordDestroyBuffer(
19043  allocator->GetCurrentFrameIndex(),
19044  allocation);
19045  }
19046 #endif
19047 
19048  if(buffer != VK_NULL_HANDLE)
19049  {
19050  (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
19051  }
19052 
19053  if(allocation != VK_NULL_HANDLE)
19054  {
19055  allocator->FreeMemory(
19056  1, // allocationCount
19057  &allocation);
19058  }
19059 }
19060 
19061 VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
19062  VmaAllocator allocator,
19063  const VkImageCreateInfo* pImageCreateInfo,
19064  const VmaAllocationCreateInfo* pAllocationCreateInfo,
19065  VkImage* pImage,
19066  VmaAllocation* pAllocation,
19067  VmaAllocationInfo* pAllocationInfo)
19068 {
19069  VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
19070 
19071  if(pImageCreateInfo->extent.width == 0 ||
19072  pImageCreateInfo->extent.height == 0 ||
19073  pImageCreateInfo->extent.depth == 0 ||
19074  pImageCreateInfo->mipLevels == 0 ||
19075  pImageCreateInfo->arrayLayers == 0)
19076  {
19077  return VK_ERROR_VALIDATION_FAILED_EXT;
19078  }
19079 
19080  VMA_DEBUG_LOG("vmaCreateImage");
19081 
19082  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19083 
19084  *pImage = VK_NULL_HANDLE;
19085  *pAllocation = VK_NULL_HANDLE;
19086 
19087  // 1. Create VkImage.
19088  VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
19089  allocator->m_hDevice,
19090  pImageCreateInfo,
19091  allocator->GetAllocationCallbacks(),
19092  pImage);
19093  if(res >= 0)
19094  {
19095  VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
19096  VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
19097  VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
19098 
19099  // 2. Allocate memory using allocator.
19100  VkMemoryRequirements vkMemReq = {};
19101  bool requiresDedicatedAllocation = false;
19102  bool prefersDedicatedAllocation = false;
19103  allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
19104  requiresDedicatedAllocation, prefersDedicatedAllocation);
19105 
19106  res = allocator->AllocateMemory(
19107  vkMemReq,
19108  requiresDedicatedAllocation,
19109  prefersDedicatedAllocation,
19110  VK_NULL_HANDLE, // dedicatedBuffer
19111  UINT32_MAX, // dedicatedBufferUsage
19112  *pImage, // dedicatedImage
19113  *pAllocationCreateInfo,
19114  suballocType,
19115  1, // allocationCount
19116  pAllocation);
19117 
19118 #if VMA_RECORDING_ENABLED
19119  if(allocator->GetRecorder() != VMA_NULL)
19120  {
19121  allocator->GetRecorder()->RecordCreateImage(
19122  allocator->GetCurrentFrameIndex(),
19123  *pImageCreateInfo,
19124  *pAllocationCreateInfo,
19125  *pAllocation);
19126  }
19127 #endif
19128 
19129  if(res >= 0)
19130  {
19131  // 3. Bind image with memory.
19132  if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
19133  {
19134  res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
19135  }
19136  if(res >= 0)
19137  {
19138  // All steps succeeded.
19139  #if VMA_STATS_STRING_ENABLED
19140  (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
19141  #endif
19142  if(pAllocationInfo != VMA_NULL)
19143  {
19144  allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
19145  }
19146 
19147  return VK_SUCCESS;
19148  }
19149  allocator->FreeMemory(
19150  1, // allocationCount
19151  pAllocation);
19152  *pAllocation = VK_NULL_HANDLE;
19153  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19154  *pImage = VK_NULL_HANDLE;
19155  return res;
19156  }
19157  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
19158  *pImage = VK_NULL_HANDLE;
19159  return res;
19160  }
19161  return res;
19162 }
19163 
19164 VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
19165  VmaAllocator allocator,
19166  VkImage image,
19167  VmaAllocation allocation)
19168 {
19169  VMA_ASSERT(allocator);
19170 
19171  if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
19172  {
19173  return;
19174  }
19175 
19176  VMA_DEBUG_LOG("vmaDestroyImage");
19177 
19178  VMA_DEBUG_GLOBAL_MUTEX_LOCK
19179 
19180 #if VMA_RECORDING_ENABLED
19181  if(allocator->GetRecorder() != VMA_NULL)
19182  {
19183  allocator->GetRecorder()->RecordDestroyImage(
19184  allocator->GetCurrentFrameIndex(),
19185  allocation);
19186  }
19187 #endif
19188 
19189  if(image != VK_NULL_HANDLE)
19190  {
19191  (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
19192  }
19193  if(allocation != VK_NULL_HANDLE)
19194  {
19195  allocator->FreeMemory(
19196  1, // allocationCount
19197  &allocation);
19198  }
19199 }
19200 
19201 #endif // #ifdef VMA_IMPLEMENTATION
VmaStats
struct VmaStats VmaStats
General statistics from current state of Allocator.
VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
Definition: vk_mem_alloc.h:2355
VmaVulkanFunctions::vkAllocateMemory
PFN_vkAllocateMemory vkAllocateMemory
Definition: vk_mem_alloc.h:2312
VmaDeviceMemoryCallbacks::pfnFree
PFN_vmaFreeDeviceMemoryFunction pfnFree
Optional, can be null.
Definition: vk_mem_alloc.h:2200
VMA_RECORD_FLAG_BITS_MAX_ENUM
@ VMA_RECORD_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2349
VmaVulkanFunctions::vkGetPhysicalDeviceProperties
PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties
Definition: vk_mem_alloc.h:2310
VmaAllocatorCreateInfo::physicalDevice
VkPhysicalDevice physicalDevice
Vulkan physical device.
Definition: vk_mem_alloc.h:2375
VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
@ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
Enables alternative, linear allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2972
VmaDefragmentationInfo2::allocationCount
uint32_t allocationCount
Number of allocations in pAllocations array.
Definition: vk_mem_alloc.h:3585
VmaAllocatorCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:2401
VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
@ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT
Definition: vk_mem_alloc.h:2263
VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
Definition: vk_mem_alloc.h:2574
VmaDefragmentationPassMoveInfo::memory
VkDeviceMemory memory
Definition: vk_mem_alloc.h:3653
VmaDefragmentationInfo2::pPools
const VmaPool * pPools
Either null or pointer to array of pools to be defragmented.
Definition: vk_mem_alloc.h:3619
VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
@ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED
Definition: vk_mem_alloc.h:2718
VmaDefragmentationInfo
struct VmaDefragmentationInfo VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
VmaPoolStats
Describes parameter of existing VmaPool.
Definition: vk_mem_alloc.h:3044
VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT
Definition: vk_mem_alloc.h:2801
VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
@ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
Allocator and all objects created from it will not be synchronized internally, so you must guarantee ...
Definition: vk_mem_alloc.h:2211
VmaPoolStats::unusedSize
VkDeviceSize unusedSize
Total number of bytes in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3050
VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
@ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT
Definition: vk_mem_alloc.h:2781
VmaRecordFlagBits
VmaRecordFlagBits
Flags to be used in VmaRecordSettings::flags.
Definition: vk_mem_alloc.h:2341
vmaSetPoolName
void vmaSetPoolName(VmaAllocator allocator, VmaPool pool, const char *pName)
Sets name of a custom pool.
VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
Definition: vk_mem_alloc.h:2196
vmaTouchAllocation
VkBool32 vmaTouchAllocation(VmaAllocator allocator, VmaAllocation allocation)
Returns VK_TRUE if allocation is not lost and atomically marks it as used in current frame.
VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT
Definition: vk_mem_alloc.h:2768
VmaAllocatorCreateInfo::preferredLargeHeapBlockSize
VkDeviceSize preferredLargeHeapBlockSize
Preferred size of a single VkDeviceMemory block to be allocated from large heaps > 1 GiB....
Definition: vk_mem_alloc.h:2381
VMA_RECORD_FLUSH_AFTER_CALL_BIT
@ VMA_RECORD_FLUSH_AFTER_CALL_BIT
Enables flush after recording every function call.
Definition: vk_mem_alloc.h:2347
VmaAllocationCreateInfo
struct VmaAllocationCreateInfo VmaAllocationCreateInfo
vmaResizeAllocation
VkResult vmaResizeAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize newSize)
Deprecated.
VmaVulkanFunctions::vkUnmapMemory
PFN_vkUnmapMemory vkUnmapMemory
Definition: vk_mem_alloc.h:2315
VmaAllocationInfo::deviceMemory
VkDeviceMemory deviceMemory
Handle to Vulkan memory object.
Definition: vk_mem_alloc.h:3187
VmaStatInfo::unusedRangeCount
uint32_t unusedRangeCount
Number of free ranges of memory between allocations.
Definition: vk_mem_alloc.h:2541
VmaAllocationCreateInfo::pUserData
void * pUserData
Custom general-purpose pointer that will be stored in VmaAllocation, can be read as VmaAllocationInfo...
Definition: vk_mem_alloc.h:2875
VmaStatInfo::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Definition: vk_mem_alloc.h:2547
VmaVulkanFunctions::vkMapMemory
PFN_vkMapMemory vkMapMemory
Definition: vk_mem_alloc.h:2314
VMA_RECORDING_ENABLED
#define VMA_RECORDING_ENABLED
Definition: vk_mem_alloc.h:2012
VmaDefragmentationPassMoveInfo::offset
VkDeviceSize offset
Definition: vk_mem_alloc.h:3654
VmaDefragmentationPassInfo::pMoves
VmaDefragmentationPassMoveInfo * pMoves
Definition: vk_mem_alloc.h:3663
VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
Definition: vk_mem_alloc.h:2812
vmaUnmapMemory
void vmaUnmapMemory(VmaAllocator allocator, VmaAllocation allocation)
Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
VmaAllocatorInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2473
VmaBudget::usage
VkDeviceSize usage
Estimated current memory usage of the program, in bytes.
Definition: vk_mem_alloc.h:2598
VmaAllocator
Represents main object of this library initialized.
VmaVulkanFunctions::vkCmdCopyBuffer
PFN_vkCmdCopyBuffer vkCmdCopyBuffer
Definition: vk_mem_alloc.h:2326
VmaAllocatorCreateInfo
Description of a Allocator to be created.
Definition: vk_mem_alloc.h:2370
VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
@ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT
Set this flag to only try to allocate from existing VkDeviceMemory blocks and never create new such b...
Definition: vk_mem_alloc.h:2742
VmaAllocatorInfo::device
VkDevice device
Handle to Vulkan device object.
Definition: vk_mem_alloc.h:2483
VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
@ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:3571
VmaPoolStats::unusedRangeSizeMax
VkDeviceSize unusedRangeSizeMax
Size of the largest continuous free memory region available for new allocation.
Definition: vk_mem_alloc.h:3063
VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT
Definition: vk_mem_alloc.h:2805
VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
@ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT
Enables usage of VK_KHR_dedicated_allocation extension.
Definition: vk_mem_alloc.h:2236
vmaSetCurrentFrameIndex
void vmaSetCurrentFrameIndex(VmaAllocator allocator, uint32_t frameIndex)
Sets index of the current frame.
VmaDefragmentationInfo::maxAllocationsToMove
uint32_t maxAllocationsToMove
Maximum number of allocations that can be moved to different place.
Definition: vk_mem_alloc.h:3680
VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
@ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT
Definition: vk_mem_alloc.h:2796
VmaMemoryUsage
VmaMemoryUsage
Definition: vk_mem_alloc.h:2657
vmaFreeMemoryPages
void vmaFreeMemoryPages(VmaAllocator allocator, size_t allocationCount, const VmaAllocation *pAllocations)
Frees memory and destroys multiple allocations.
vmaGetMemoryTypeProperties
void vmaGetMemoryTypeProperties(VmaAllocator allocator, uint32_t memoryTypeIndex, VkMemoryPropertyFlags *pFlags)
Given Memory Type Index, returns Property Flags of this memory type.
VmaStatInfo::blockCount
uint32_t blockCount
Number of VkDeviceMemory Vulkan memory blocks allocated.
Definition: vk_mem_alloc.h:2537
VmaPoolCreateInfo::memoryTypeIndex
uint32_t memoryTypeIndex
Vulkan memory type index to allocate this pool from.
Definition: vk_mem_alloc.h:3000
VmaPoolCreateInfo::blockSize
VkDeviceSize blockSize
Size of a single VkDeviceMemory block to be allocated as part of this pool, in bytes....
Definition: vk_mem_alloc.h:3012
VmaDefragmentationInfo2::poolCount
uint32_t poolCount
Numer of pools in pPools array.
Definition: vk_mem_alloc.h:3603
VmaDefragmentationPassMoveInfo
Definition: vk_mem_alloc.h:3651
vmaBuildStatsString
void vmaBuildStatsString(VmaAllocator allocator, char **ppStatsString, VkBool32 detailedMap)
Builds and returns statistics as string in JSON format.
vmaGetAllocationInfo
void vmaGetAllocationInfo(VmaAllocator allocator, VmaAllocation allocation, VmaAllocationInfo *pAllocationInfo)
Returns current information about specified allocation and atomically marks it as used in current fra...
VmaPoolStats::allocationCount
size_t allocationCount
Number of VmaAllocation objects created from this pool that were not destroyed or lost.
Definition: vk_mem_alloc.h:3053
VmaAllocatorCreateFlags
VkFlags VmaAllocatorCreateFlags
Definition: vk_mem_alloc.h:2303
vmaFreeStatsString
void vmaFreeStatsString(VmaAllocator allocator, char *pStatsString)
vmaAllocateMemoryForBuffer
VkResult vmaAllocateMemoryForBuffer(VmaAllocator allocator, VkBuffer buffer, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaVulkanFunctions
struct VmaVulkanFunctions VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2301
VmaDefragmentationFlagBits
VmaDefragmentationFlagBits
Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use.
Definition: vk_mem_alloc.h:3569
VmaAllocationInfo::offset
VkDeviceSize offset
Offset into deviceMemory object to the beginning of this allocation, in bytes. (deviceMemory,...
Definition: vk_mem_alloc.h:3192
VmaAllocationCreateFlagBits
VmaAllocationCreateFlagBits
Flags to be passed as VmaAllocationCreateInfo::flags.
Definition: vk_mem_alloc.h:2724
VmaVulkanFunctions::vkGetPhysicalDeviceMemoryProperties
PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties
Definition: vk_mem_alloc.h:2311
VmaPoolCreateFlags
VkFlags VmaPoolCreateFlags
Definition: vk_mem_alloc.h:2993
vmaCreateLostAllocation
void vmaCreateLostAllocation(VmaAllocator allocator, VmaAllocation *pAllocation)
Creates new allocation that is in lost state from the beginning.
vmaInvalidateAllocations
VkResult vmaInvalidateAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Invalidates memory of given set of allocations.
VmaDeviceMemoryCallbacks
struct VmaDeviceMemoryCallbacks VmaDeviceMemoryCallbacks
Set of callbacks that the library will call for vkAllocateMemory and vkFreeMemory.
vmaGetPhysicalDeviceProperties
void vmaGetPhysicalDeviceProperties(VmaAllocator allocator, const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
VmaAllocationCreateInfo::pool
VmaPool pool
Pool that this allocation should be created in.
Definition: vk_mem_alloc.h:2868
vmaGetMemoryProperties
void vmaGetMemoryProperties(VmaAllocator allocator, const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties)
VmaStats::total
VmaStatInfo total
Definition: vk_mem_alloc.h:2555
VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
Set this flag if the allocation should have its own memory block.
Definition: vk_mem_alloc.h:2731
vmaDefragmentationEnd
VkResult vmaDefragmentationEnd(VmaAllocator allocator, VmaDefragmentationContext context)
Ends defragmentation process.
VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
@ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT
Definition: vk_mem_alloc.h:2251
VmaDefragmentationInfo2::flags
VmaDefragmentationFlags flags
Reserved for future use. Should be 0.
Definition: vk_mem_alloc.h:3582
VmaVulkanFunctions::vkBindImageMemory
PFN_vkBindImageMemory vkBindImageMemory
Definition: vk_mem_alloc.h:2319
VmaDefragmentationInfo2::maxGpuBytesToMove
VkDeviceSize maxGpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3634
VmaDefragmentationStats
Statistics returned by function vmaDefragment().
Definition: vk_mem_alloc.h:3684
vmaDestroyPool
void vmaDestroyPool(VmaAllocator allocator, VmaPool pool)
Destroys VmaPool object and frees Vulkan device memory.
VmaPoolStats::size
VkDeviceSize size
Total amount of VkDeviceMemory allocated from Vulkan for this pool, in bytes.
Definition: vk_mem_alloc.h:3047
VmaVulkanFunctions::vkFreeMemory
PFN_vkFreeMemory vkFreeMemory
Definition: vk_mem_alloc.h:2313
VmaRecordFlags
VkFlags VmaRecordFlags
Definition: vk_mem_alloc.h:2351
vmaFlushAllocation
VkResult vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Flushes memory of given allocation.
VMA_MEMORY_USAGE_CPU_ONLY
@ VMA_MEMORY_USAGE_CPU_ONLY
Definition: vk_mem_alloc.h:2688
VmaAllocation
Represents single memory allocation.
VMA_MEMORY_USAGE_CPU_COPY
@ VMA_MEMORY_USAGE_CPU_COPY
Definition: vk_mem_alloc.h:2710
vmaSetAllocationUserData
void vmaSetAllocationUserData(VmaAllocator allocator, VmaAllocation allocation, void *pUserData)
Sets pUserData in given allocation to new value.
VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
@ VMA_DEFRAGMENTATION_FLAG_INCREMENTAL
Definition: vk_mem_alloc.h:3570
VmaAllocatorCreateInfo::pRecordSettings
const VmaRecordSettings * pRecordSettings
Parameters for recording of VMA calls. Can be null.
Definition: vk_mem_alloc.h:2439
VmaVulkanFunctions::vkBindBufferMemory
PFN_vkBindBufferMemory vkBindBufferMemory
Definition: vk_mem_alloc.h:2318
VmaVulkanFunctions::vkGetBufferMemoryRequirements
PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements
Definition: vk_mem_alloc.h:2320
VmaDefragmentationInfo2::commandBuffer
VkCommandBuffer commandBuffer
Optional. Command buffer where GPU copy commands will be posted.
Definition: vk_mem_alloc.h:3648
VmaStats
General statistics from current state of Allocator.
Definition: vk_mem_alloc.h:2552
VmaPoolCreateInfo::minBlockCount
size_t minBlockCount
Minimum number of blocks to be always allocated in this pool, even if they stay empty.
Definition: vk_mem_alloc.h:3017
VmaAllocatorCreateInfo::vulkanApiVersion
uint32_t vulkanApiVersion
Optional. The highest version of Vulkan that the application is designed to use.
Definition: vk_mem_alloc.h:2453
VmaStatInfo
Calculated statistics of memory usage in entire allocator.
Definition: vk_mem_alloc.h:2535
VmaDefragmentationStats::bytesFreed
VkDeviceSize bytesFreed
Total number of bytes that have been released to the system by freeing empty VkDeviceMemory objects.
Definition: vk_mem_alloc.h:3688
vmaDefragment
VkResult vmaDefragment(VmaAllocator allocator, const VmaAllocation *pAllocations, size_t allocationCount, VkBool32 *pAllocationsChanged, const VmaDefragmentationInfo *pDefragmentationInfo, VmaDefragmentationStats *pDefragmentationStats)
Deprecated. Compacts memory by moving allocations.
VmaDefragmentationPassInfo::moveCount
uint32_t moveCount
Definition: vk_mem_alloc.h:3662
VMA_MEMORY_USAGE_GPU_ONLY
@ VMA_MEMORY_USAGE_GPU_ONLY
Definition: vk_mem_alloc.h:2678
vmaBeginDefragmentationPass
VkResult vmaBeginDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context, VmaDefragmentationPassInfo *pInfo)
vmaFindMemoryTypeIndex
VkResult vmaFindMemoryTypeIndex(VmaAllocator allocator, uint32_t memoryTypeBits, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
vmaFlushAllocations
VkResult vmaFlushAllocations(VmaAllocator allocator, uint32_t allocationCount, const VmaAllocation *allocations, const VkDeviceSize *offsets, const VkDeviceSize *sizes)
Flushes memory of given set of allocations.
vmaCreatePool
VkResult vmaCreatePool(VmaAllocator allocator, const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool)
Allocates Vulkan device memory and creates VmaPool object.
VmaStatInfo::unusedBytes
VkDeviceSize unusedBytes
Total number of bytes occupied by unused ranges.
Definition: vk_mem_alloc.h:2545
VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
@ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
Definition: vk_mem_alloc.h:2299
vmaAllocateMemoryPages
VkResult vmaAllocateMemoryPages(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, VmaAllocation *pAllocations, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation for multiple allocation objects at once.
VmaStatInfo::usedBytes
VkDeviceSize usedBytes
Total number of bytes occupied by all allocations.
Definition: vk_mem_alloc.h:2543
VmaAllocatorCreateInfo::pAllocationCallbacks
const VkAllocationCallbacks * pAllocationCallbacks
Custom CPU memory allocation callbacks. Optional.
Definition: vk_mem_alloc.h:2384
VmaAllocatorCreateFlagBits
VmaAllocatorCreateFlagBits
Flags for created VmaAllocator.
Definition: vk_mem_alloc.h:2206
vmaAllocateMemoryForImage
VkResult vmaAllocateMemoryForImage(VmaAllocator allocator, VkImage image, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaAllocateMemoryForBuffer().
VmaPoolCreateInfo::maxBlockCount
size_t maxBlockCount
Maximum number of blocks that can be allocated in this pool. Optional.
Definition: vk_mem_alloc.h:3025
VmaPoolCreateInfo
Describes parameter of created VmaPool.
Definition: vk_mem_alloc.h:2997
VmaDeviceMemoryCallbacks::pfnAllocate
PFN_vmaAllocateDeviceMemoryFunction pfnAllocate
Optional, can be null.
Definition: vk_mem_alloc.h:2198
VmaPool
Represents custom memory pool.
VMA_MEMORY_USAGE_GPU_TO_CPU
@ VMA_MEMORY_USAGE_GPU_TO_CPU
Definition: vk_mem_alloc.h:2704
VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
@ VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT
Definition: vk_mem_alloc.h:2775
VmaPoolCreateInfo::flags
VmaPoolCreateFlags flags
Use combination of VmaPoolCreateFlagBits.
Definition: vk_mem_alloc.h:3003
VMA_MEMORY_USAGE_MAX_ENUM
@ VMA_MEMORY_USAGE_MAX_ENUM
Definition: vk_mem_alloc.h:2720
VmaStatInfo::allocationCount
uint32_t allocationCount
Number of VmaAllocation allocation objects allocated.
Definition: vk_mem_alloc.h:2539
VmaVulkanFunctions::vkInvalidateMappedMemoryRanges
PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges
Definition: vk_mem_alloc.h:2317
vmaAllocateMemory
VkResult vmaAllocateMemory(VmaAllocator allocator, const VkMemoryRequirements *pVkMemoryRequirements, const VmaAllocationCreateInfo *pCreateInfo, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
General purpose memory allocation.
VmaDefragmentationInfo2
Parameters for defragmentation.
Definition: vk_mem_alloc.h:3579
VmaDefragmentationInfo::maxBytesToMove
VkDeviceSize maxBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3675
VmaBudget::blockBytes
VkDeviceSize blockBytes
Sum size of all VkDeviceMemory blocks allocated from particular heap, in bytes.
Definition: vk_mem_alloc.h:2577
VmaAllocatorInfo
Information about existing VmaAllocator object.
Definition: vk_mem_alloc.h:2468
VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2991
VmaAllocationCreateInfo::requiredFlags
VkMemoryPropertyFlags requiredFlags
Flags that must be set in a Memory Type chosen for an allocation.
Definition: vk_mem_alloc.h:2849
VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT
Definition: vk_mem_alloc.h:2822
VmaStatInfo
struct VmaStatInfo VmaStatInfo
Calculated statistics of memory usage in entire allocator.
VmaStatInfo::allocationSizeAvg
VkDeviceSize allocationSizeAvg
Definition: vk_mem_alloc.h:2546
vmaDestroyAllocator
void vmaDestroyAllocator(VmaAllocator allocator)
Destroys allocator object.
VmaAllocatorCreateInfo::pDeviceMemoryCallbacks
const VmaDeviceMemoryCallbacks * pDeviceMemoryCallbacks
Informative callbacks for vkAllocateMemory, vkFreeMemory. Optional.
Definition: vk_mem_alloc.h:2387
VMA_ALLOCATION_CREATE_STRATEGY_MASK
@ VMA_ALLOCATION_CREATE_STRATEGY_MASK
Definition: vk_mem_alloc.h:2826
VmaAllocatorCreateInfo::device
VkDevice device
Vulkan device.
Definition: vk_mem_alloc.h:2378
vmaFindMemoryTypeIndexForImageInfo
VkResult vmaFindMemoryTypeIndexForImageInfo(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
vmaMapMemory
VkResult vmaMapMemory(VmaAllocator allocator, VmaAllocation allocation, void **ppData)
Maps memory represented by given allocation and returns pointer to it.
vmaBindBufferMemory
VkResult vmaBindBufferMemory(VmaAllocator allocator, VmaAllocation allocation, VkBuffer buffer)
Binds buffer to allocation.
VmaAllocatorCreateInfo::pHeapSizeLimit
const VkDeviceSize * pHeapSizeLimit
Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out o...
Definition: vk_mem_alloc.h:2426
VmaDefragmentationPassMoveInfo::allocation
VmaAllocation allocation
Definition: vk_mem_alloc.h:3652
vmaCreateImage
VkResult vmaCreateImage(VmaAllocator allocator, const VkImageCreateInfo *pImageCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkImage *pImage, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
Function similar to vmaCreateBuffer().
vmaFindMemoryTypeIndexForBufferInfo
VkResult vmaFindMemoryTypeIndexForBufferInfo(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, uint32_t *pMemoryTypeIndex)
Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
VmaBudget::budget
VkDeviceSize budget
Estimated amount of memory available to the program, in bytes.
Definition: vk_mem_alloc.h:2609
VmaPoolStats
struct VmaPoolStats VmaPoolStats
Describes parameter of existing VmaPool.
VmaDefragmentationPassInfo
struct VmaDefragmentationPassInfo VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
VmaVulkanFunctions
Pointers to some Vulkan functions - a subset used by the library.
Definition: vk_mem_alloc.h:2309
VmaAllocationInfo::pMappedData
void * pMappedData
Pointer to the beginning of this allocation as mapped data.
Definition: vk_mem_alloc.h:3212
VmaAllocatorCreateInfo::flags
VmaAllocatorCreateFlags flags
Flags for created allocator. Use VmaAllocatorCreateFlagBits enum.
Definition: vk_mem_alloc.h:2372
VmaDefragmentationFlags
VkFlags VmaDefragmentationFlags
Definition: vk_mem_alloc.h:3573
VmaDefragmentationInfo2::pAllocations
const VmaAllocation * pAllocations
Pointer to array of allocations that can be defragmented.
Definition: vk_mem_alloc.h:3594
vmaGetPoolStats
void vmaGetPoolStats(VmaAllocator allocator, VmaPool pool, VmaPoolStats *pPoolStats)
Retrieves statistics of existing VmaPool object.
VmaVulkanFunctions::vkCreateImage
PFN_vkCreateImage vkCreateImage
Definition: vk_mem_alloc.h:2324
VmaDeviceMemoryCallbacks::pUserData
void * pUserData
Optional, can be null.
Definition: vk_mem_alloc.h:2202
VmaRecordSettings
struct VmaRecordSettings VmaRecordSettings
Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSetting...
VmaStatInfo::unusedRangeSizeAvg
VkDeviceSize unusedRangeSizeAvg
Definition: vk_mem_alloc.h:2547
VMA_MEMORY_USAGE_CPU_TO_GPU
@ VMA_MEMORY_USAGE_CPU_TO_GPU
Definition: vk_mem_alloc.h:2695
VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
Definition: vk_mem_alloc.h:2819
VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
@ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT
Definition: vk_mem_alloc.h:2816
VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
@ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
Definition: vk_mem_alloc.h:2281
VmaDefragmentationStats
struct VmaDefragmentationStats VmaDefragmentationStats
Statistics returned by function vmaDefragment().
VmaAllocationCreateInfo::usage
VmaMemoryUsage usage
Intended usage of memory.
Definition: vk_mem_alloc.h:2844
VmaStatInfo::allocationSizeMin
VkDeviceSize allocationSizeMin
Definition: vk_mem_alloc.h:2546
vmaBindBufferMemory2
VkResult vmaBindBufferMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, const void *pNext)
Binds buffer to allocation with additional parameters.
VmaAllocationInfo::size
VkDeviceSize size
Size of this allocation, in bytes.
Definition: vk_mem_alloc.h:3203
VmaRecordSettings::flags
VmaRecordFlags flags
Flags for recording. Use VmaRecordFlagBits enum.
Definition: vk_mem_alloc.h:2357
VmaVulkanFunctions::vkFlushMappedMemoryRanges
PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges
Definition: vk_mem_alloc.h:2316
VmaAllocationInfo::pUserData
void * pUserData
Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vma...
Definition: vk_mem_alloc.h:3217
vmaMakePoolAllocationsLost
void vmaMakePoolAllocationsLost(VmaAllocator allocator, VmaPool pool, size_t *pLostAllocationCount)
Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInf...
VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
@ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT
Use this flag if you always allocate only buffers and linear images or only optimal images out of thi...
Definition: vk_mem_alloc.h:2955
vmaInvalidateAllocation
VkResult vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size)
Invalidates memory of given allocation.
vmaCreateBuffer
VkResult vmaCreateBuffer(VmaAllocator allocator, const VkBufferCreateInfo *pBufferCreateInfo, const VmaAllocationCreateInfo *pAllocationCreateInfo, VkBuffer *pBuffer, VmaAllocation *pAllocation, VmaAllocationInfo *pAllocationInfo)
VmaStats::memoryHeap
VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]
Definition: vk_mem_alloc.h:2554
VmaAllocatorCreateInfo::pVulkanFunctions
const VmaVulkanFunctions * pVulkanFunctions
Pointers to Vulkan functions. Can be null.
Definition: vk_mem_alloc.h:2432
VmaPoolStats::blockCount
size_t blockCount
Number of VkDeviceMemory blocks allocated for this pool.
Definition: vk_mem_alloc.h:3066
vmaCreateAllocator
VkResult vmaCreateAllocator(const VmaAllocatorCreateInfo *pCreateInfo, VmaAllocator *pAllocator)
Creates Allocator object.
vmaCheckCorruption
VkResult vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits)
Checks magic number in margins around all allocations in given memory types (in both default and cust...
VmaDefragmentationPassInfo
Parameters for incremental defragmentation steps.
Definition: vk_mem_alloc.h:3661
VmaStats::memoryType
VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]
Definition: vk_mem_alloc.h:2553
VmaAllocationCreateFlags
VkFlags VmaAllocationCreateFlags
Definition: vk_mem_alloc.h:2833
VmaAllocatorCreateInfo::instance
VkInstance instance
Handle to Vulkan instance object.
Definition: vk_mem_alloc.h:2444
VMA_MEMORY_USAGE_UNKNOWN
@ VMA_MEMORY_USAGE_UNKNOWN
Definition: vk_mem_alloc.h:2661
VmaDefragmentationInfo2::maxGpuAllocationsToMove
uint32_t maxGpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on GPU side,...
Definition: vk_mem_alloc.h:3639
VmaVulkanFunctions::vkDestroyBuffer
PFN_vkDestroyBuffer vkDestroyBuffer
Definition: vk_mem_alloc.h:2323
VmaPoolCreateInfo::frameInUseCount
uint32_t frameInUseCount
Maximum number of additional frames that are in use at the same time as current frame.
Definition: vk_mem_alloc.h:3039
VmaVulkanFunctions::vkDestroyImage
PFN_vkDestroyImage vkDestroyImage
Definition: vk_mem_alloc.h:2325
VmaDefragmentationInfo2::maxCpuBytesToMove
VkDeviceSize maxCpuBytesToMove
Maximum total numbers of bytes that can be copied while moving allocations to different places using ...
Definition: vk_mem_alloc.h:3624
VmaPoolCreateInfo
struct VmaPoolCreateInfo VmaPoolCreateInfo
Describes parameter of created VmaPool.
vmaGetPoolName
void vmaGetPoolName(VmaAllocator allocator, VmaPool pool, const char **ppName)
Retrieves name of a custom pool.
VmaAllocationInfo::memoryType
uint32_t memoryType
Memory type index that this allocation was allocated from.
Definition: vk_mem_alloc.h:3178
vmaDestroyImage
void vmaDestroyImage(VmaAllocator allocator, VkImage image, VmaAllocation allocation)
Destroys Vulkan image and frees allocated memory.
VMA_ALLOCATION_CREATE_MAPPED_BIT
@ VMA_ALLOCATION_CREATE_MAPPED_BIT
Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
Definition: vk_mem_alloc.h:2755
vmaCalculateStats
void vmaCalculateStats(VmaAllocator allocator, VmaStats *pStats)
Retrieves statistics from current state of the Allocator.
vmaDestroyBuffer
void vmaDestroyBuffer(VmaAllocator allocator, VkBuffer buffer, VmaAllocation allocation)
Destroys Vulkan buffer and frees allocated memory.
VmaVulkanFunctions::vkCreateBuffer
PFN_vkCreateBuffer vkCreateBuffer
Definition: vk_mem_alloc.h:2322
PFN_vmaAllocateDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaAllocateDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called after successful vkAllocateMemory.
Definition: vk_mem_alloc.h:2175
vmaGetAllocatorInfo
void vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo)
Returns information about existing VmaAllocator object - handle to Vulkan device etc.
VmaPoolStats::unusedRangeCount
size_t unusedRangeCount
Number of continuous memory ranges in the pool not used by any VmaAllocation.
Definition: vk_mem_alloc.h:3056
VmaPoolCreateFlagBits
VmaPoolCreateFlagBits
Flags to be passed as VmaPoolCreateInfo::flags.
Definition: vk_mem_alloc.h:2937
VmaAllocationInfo
struct VmaAllocationInfo VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
VmaDefragmentationStats::bytesMoved
VkDeviceSize bytesMoved
Total number of bytes that have been copied while moving allocations to different places.
Definition: vk_mem_alloc.h:3686
VmaStatInfo::unusedRangeSizeMin
VkDeviceSize unusedRangeSizeMin
Definition: vk_mem_alloc.h:2547
VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
@ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
Definition: vk_mem_alloc.h:2786
vmaCheckPoolCorruption
VkResult vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
Checks magic number in margins around all allocations in given memory pool in search for corruptions.
vmaBindImageMemory
VkResult vmaBindImageMemory(VmaAllocator allocator, VmaAllocation allocation, VkImage image)
Binds image to allocation.
PFN_vmaFreeDeviceMemoryFunction
void(VKAPI_PTR * PFN_vmaFreeDeviceMemoryFunction)(VmaAllocator allocator, uint32_t memoryType, VkDeviceMemory memory, VkDeviceSize size, void *pUserData)
Callback function called before vkFreeMemory.
Definition: vk_mem_alloc.h:2182
VmaDefragmentationPassMoveInfo
struct VmaDefragmentationPassMoveInfo VmaDefragmentationPassMoveInfo
VmaAllocationCreateInfo::flags
VmaAllocationCreateFlags flags
Use VmaAllocationCreateFlagBits enum.
Definition: vk_mem_alloc.h:2838
VmaVulkanFunctions::vkGetImageMemoryRequirements
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements
Definition: vk_mem_alloc.h:2321
vmaGetBudget
void vmaGetBudget(VmaAllocator allocator, VmaBudget *pBudget)
Retrieves information about current memory budget for all memory heaps.
VmaAllocationCreateInfo
Definition: vk_mem_alloc.h:2836
VmaAllocationCreateInfo::preferredFlags
VkMemoryPropertyFlags preferredFlags
Flags that preferably should be set in a memory type chosen for an allocation.
Definition: vk_mem_alloc.h:2854
vmaDefragmentationBegin
VkResult vmaDefragmentationBegin(VmaAllocator allocator, const VmaDefragmentationInfo2 *pInfo, VmaDefragmentationStats *pStats, VmaDefragmentationContext *pContext)
Begins defragmentation process.
vmaBindImageMemory2
VkResult vmaBindImageMemory2(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, const void *pNext)
Binds image to allocation with additional parameters.
VmaBudget
struct VmaBudget VmaBudget
Statistics of current memory usage and available budget, in bytes, for specific memory heap.
vmaEndDefragmentationPass
VkResult vmaEndDefragmentationPass(VmaAllocator allocator, VmaDefragmentationContext context)
VmaDefragmentationInfo2::pAllocationsChanged
VkBool32 * pAllocationsChanged
Optional, output. Pointer to array that will be filled with information whether the allocation at cer...
Definition: vk_mem_alloc.h:3600
VmaDefragmentationStats::allocationsMoved
uint32_t allocationsMoved
Number of allocations that have been moved to different places.
Definition: vk_mem_alloc.h:3690
VmaAllocationCreateInfo::memoryTypeBits
uint32_t memoryTypeBits
Bitmask containing one bit set for every memory type acceptable for this allocation.
Definition: vk_mem_alloc.h:2862
VmaAllocatorInfo::physicalDevice
VkPhysicalDevice physicalDevice
Handle to Vulkan physical device object.
Definition: vk_mem_alloc.h:2478
VmaDefragmentationStats::deviceMemoryBlocksFreed
uint32_t deviceMemoryBlocksFreed
Number of empty VkDeviceMemory objects that have been released to the system.
Definition: vk_mem_alloc.h:3692
VmaRecordSettings::pFilePath
const char * pFilePath
Path to the file that should be written by the recording.
Definition: vk_mem_alloc.h:2365
VmaStatInfo::allocationSizeMax
VkDeviceSize allocationSizeMax
Definition: vk_mem_alloc.h:2546
VmaAllocationInfo
Parameters of VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
Definition: vk_mem_alloc.h:3173
VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
@ VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT
Enables alternative, buddy allocation algorithm in this pool.
Definition: vk_mem_alloc.h:2983
VmaAllocatorInfo
struct VmaAllocatorInfo VmaAllocatorInfo
Information about existing VmaAllocator object.
VmaBudget::allocationBytes
VkDeviceSize allocationBytes
Sum size of all allocations created in particular heap, in bytes.
Definition: vk_mem_alloc.h:2588
VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
@ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM
Definition: vk_mem_alloc.h:2831
VmaDefragmentationContext
Represents Opaque object that represents started defragmentation process.
VMA_POOL_CREATE_ALGORITHM_MASK
@ VMA_POOL_CREATE_ALGORITHM_MASK
Definition: vk_mem_alloc.h:2987
VmaDefragmentationInfo2::maxCpuAllocationsToMove
uint32_t maxCpuAllocationsToMove
Maximum number of allocations that can be moved to a different place using transfers on CPU side,...
Definition: vk_mem_alloc.h:3629
vmaFreeMemory
void vmaFreeMemory(VmaAllocator allocator, const VmaAllocation allocation)
Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(),...
VmaDefragmentationInfo
Deprecated. Optional configuration parameters to be passed to function vmaDefragment().
Definition: vk_mem_alloc.h:3670
VMA_ALLOCATION_CREATE_DONT_BIND_BIT
@ VMA_ALLOCATION_CREATE_DONT_BIND_BIT
Definition: vk_mem_alloc.h:2792
VmaDefragmentationInfo2
struct VmaDefragmentationInfo2 VmaDefragmentationInfo2
Parameters for defragmentation.